1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory
22 stores of constant values, values loaded from memory or bitwise operations
23 on those to consecutive memory locations into fewer wider stores.
24 For example, if we have a sequence peforming four byte stores to
25 consecutive memory locations:
30 we can transform this into a single 4-byte store if the target supports it:
31 [p] := imm1:imm2:imm3:imm4 //concatenated immediates according to endianness.
38 if there is no overlap can be transformed into a single 4-byte
39 load followed by single 4-byte store.
43 [p + 1B] := [q + 1B] ^ imm2;
44 [p + 2B] := [q + 2B] ^ imm3;
45 [p + 3B] := [q + 3B] ^ imm4;
46 if there is no overlap can be transformed into a single 4-byte
47 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
49 The algorithm is applied to each basic block in three phases:
51 1) Scan through the basic block recording assignments to
52 destinations that can be expressed as a store to memory of a certain size
53 at a certain bit offset from expressions we can handle. For bit-fields
54 we also note the surrounding bit region, bits that could be stored in
55 a read-modify-write operation when storing the bit-field. Record store
56 chains to different bases in a hash_map (m_stores) and make sure to
57 terminate such chains when appropriate (for example when when the stored
58 values get used subsequently).
59 These stores can be a result of structure element initializers, array stores
60 etc. A store_immediate_info object is recorded for every such store.
61 Record as many such assignments to a single base as possible until a
62 statement that interferes with the store sequence is encountered.
63 Each store has up to 2 operands, which can be an immediate constant
64 or a memory load, from which the value to be stored can be computed.
65 At most one of the operands can be a constant. The operands are recorded
66 in store_operand_info struct.
68 2) Analyze the chain of stores recorded in phase 1) (i.e. the vector of
69 store_immediate_info objects) and coalesce contiguous stores into
70 merged_store_group objects. For bit-fields stores, we don't need to
71 require the stores to be contiguous, just their surrounding bit regions
72 have to be contiguous. If the expression being stored is different
73 between adjacent stores, such as one store storing a constant and
74 following storing a value loaded from memory, or if the loaded memory
75 objects are not adjacent, a new merged_store_group is created as well.
77 For example, given the stores:
84 This phase would produce two merged_store_group objects, one recording the
85 two bytes stored in the memory region [p : p + 1] and another
86 recording the four bytes stored in the memory region [p + 3 : p + 6].
88 3) The merged_store_group objects produced in phase 2) are processed
89 to generate the sequence of wider stores that set the contiguous memory
90 regions to the sequence of bytes that correspond to it. This may emit
91 multiple stores per store group to handle contiguous stores that are not
92 of a size that is a power of 2. For example it can try to emit a 40-bit
93 store as a 32-bit store followed by an 8-bit store.
94 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT or
95 TARGET_SLOW_UNALIGNED_ACCESS rules.
97 Note on endianness and example:
98 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
104 The memory layout for little-endian (LE) and big-endian (BE) must be:
114 To merge these into a single 48-bit merged value 'val' in phase 2)
115 on little-endian we insert stores to higher (consecutive) bitpositions
116 into the most significant bits of the merged value.
117 The final merged value would be: 0xcdab56781234
119 For big-endian we insert stores to higher bitpositions into the least
120 significant bits of the merged value.
121 The final merged value would be: 0x12345678abcd
123 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
124 followed by a 16-bit store. Again, we must consider endianness when
125 breaking down the 48-bit value 'val' computed above.
126 For little endian we emit:
127 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
128 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
130 Whereas for big-endian we emit:
131 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
132 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
136 #include "coretypes.h"
140 #include "builtins.h"
141 #include "fold-const.h"
142 #include "tree-pass.h"
144 #include "gimple-pretty-print.h"
146 #include "fold-const.h"
148 #include "print-tree.h"
149 #include "tree-hash-traits.h"
150 #include "gimple-iterator.h"
151 #include "gimplify.h"
152 #include "stor-layout.h"
154 #include "tree-cfg.h"
157 #include "gimplify-me.h"
159 #include "expr.h" /* For get_bit_range. */
160 #include "optabs-tree.h"
161 #include "selftest.h"
163 /* The maximum size (in bits) of the stores this pass should generate. */
164 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
165 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
167 /* Limit to bound the number of aliasing checks for loads with the same
168 vuse as the corresponding store. */
169 #define MAX_STORE_ALIAS_CHECKS 64
175 /* Number of hand-written 16-bit nop / bswaps found. */
178 /* Number of hand-written 32-bit nop / bswaps found. */
181 /* Number of hand-written 64-bit nop / bswaps found. */
183 } nop_stats
, bswap_stats
;
185 /* A symbolic number structure is used to detect byte permutation and selection
186 patterns of a source. To achieve that, its field N contains an artificial
187 number consisting of BITS_PER_MARKER sized markers tracking where does each
188 byte come from in the source:
190 0 - target byte has the value 0
191 FF - target byte has an unknown value (eg. due to sign extension)
192 1..size - marker value is the byte index in the source (0 for lsb).
194 To detect permutations on memory sources (arrays and structures), a symbolic
195 number is also associated:
196 - a base address BASE_ADDR and an OFFSET giving the address of the source;
197 - a range which gives the difference between the highest and lowest accessed
198 memory location to make such a symbolic number;
199 - the address SRC of the source element of lowest address as a convenience
200 to easily get BASE_ADDR + offset + lowest bytepos;
201 - number of expressions N_OPS bitwise ored together to represent
202 approximate cost of the computation.
204 Note 1: the range is different from size as size reflects the size of the
205 type of the current expression. For instance, for an array char a[],
206 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
207 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
210 Note 2: for non-memory sources, range holds the same value as size.
212 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
214 struct symbolic_number
{
219 HOST_WIDE_INT bytepos
;
223 unsigned HOST_WIDE_INT range
;
227 #define BITS_PER_MARKER 8
228 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
229 #define MARKER_BYTE_UNKNOWN MARKER_MASK
230 #define HEAD_MARKER(n, size) \
231 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
233 /* The number which the find_bswap_or_nop_1 result should match in
234 order to have a nop. The number is masked according to the size of
235 the symbolic number before using it. */
236 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
237 (uint64_t)0x08070605 << 32 | 0x04030201)
239 /* The number which the find_bswap_or_nop_1 result should match in
240 order to have a byte swap. The number is masked according to the
241 size of the symbolic number before using it. */
242 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
243 (uint64_t)0x01020304 << 32 | 0x05060708)
245 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
246 number N. Return false if the requested operation is not permitted
247 on a symbolic number. */
250 do_shift_rotate (enum tree_code code
,
251 struct symbolic_number
*n
,
254 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
255 unsigned head_marker
;
257 if (count
% BITS_PER_UNIT
!= 0)
259 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
261 /* Zero out the extra bits of N in order to avoid them being shifted
262 into the significant bits. */
263 if (size
< 64 / BITS_PER_MARKER
)
264 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
272 head_marker
= HEAD_MARKER (n
->n
, size
);
274 /* Arithmetic shift of signed type: result is dependent on the value. */
275 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
276 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
277 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
278 << ((size
- 1 - i
) * BITS_PER_MARKER
);
281 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
284 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
289 /* Zero unused bits for size. */
290 if (size
< 64 / BITS_PER_MARKER
)
291 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
295 /* Perform sanity checking for the symbolic number N and the gimple
299 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
303 lhs_type
= gimple_expr_type (stmt
);
305 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
308 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
314 /* Initialize the symbolic number N for the bswap pass from the base element
315 SRC manipulated by the bitwise OR expression. */
318 init_symbolic_number (struct symbolic_number
*n
, tree src
)
322 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
325 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
328 /* Set up the symbolic number N by setting each byte to a value between 1 and
329 the byte size of rhs1. The highest order byte is set to n->size and the
330 lowest order byte to 1. */
331 n
->type
= TREE_TYPE (src
);
332 size
= TYPE_PRECISION (n
->type
);
333 if (size
% BITS_PER_UNIT
!= 0)
335 size
/= BITS_PER_UNIT
;
336 if (size
> 64 / BITS_PER_MARKER
)
342 if (size
< 64 / BITS_PER_MARKER
)
343 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
348 /* Check if STMT might be a byte swap or a nop from a memory source and returns
349 the answer. If so, REF is that memory source and the base of the memory area
350 accessed and the offset of the access from that base are recorded in N. */
353 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
355 /* Leaf node is an array or component ref. Memorize its base and
356 offset from base to compare to other such leaf node. */
357 HOST_WIDE_INT bitsize
, bitpos
;
359 int unsignedp
, reversep
, volatilep
;
360 tree offset
, base_addr
;
362 /* Not prepared to handle PDP endian. */
363 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
366 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
369 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
370 &unsignedp
, &reversep
, &volatilep
);
372 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
373 /* Do not rewrite TARGET_MEM_REF. */
375 else if (TREE_CODE (base_addr
) == MEM_REF
)
377 offset_int bit_offset
= 0;
378 tree off
= TREE_OPERAND (base_addr
, 1);
380 if (!integer_zerop (off
))
382 offset_int boff
, coff
= mem_ref_offset (base_addr
);
383 boff
= coff
<< LOG2_BITS_PER_UNIT
;
387 base_addr
= TREE_OPERAND (base_addr
, 0);
389 /* Avoid returning a negative bitpos as this may wreak havoc later. */
390 if (wi::neg_p (bit_offset
))
392 offset_int mask
= wi::mask
<offset_int
> (LOG2_BITS_PER_UNIT
, false);
393 offset_int tem
= wi::bit_and_not (bit_offset
, mask
);
394 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
395 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
397 tem
>>= LOG2_BITS_PER_UNIT
;
399 offset
= size_binop (PLUS_EXPR
, offset
,
400 wide_int_to_tree (sizetype
, tem
));
402 offset
= wide_int_to_tree (sizetype
, tem
);
405 bitpos
+= bit_offset
.to_shwi ();
408 base_addr
= build_fold_addr_expr (base_addr
);
410 if (bitpos
% BITS_PER_UNIT
)
412 if (bitsize
% BITS_PER_UNIT
)
417 if (!init_symbolic_number (n
, ref
))
419 n
->base_addr
= base_addr
;
421 n
->bytepos
= bitpos
/ BITS_PER_UNIT
;
422 n
->alias_set
= reference_alias_ptr_type (ref
);
423 n
->vuse
= gimple_vuse (stmt
);
427 /* Compute the symbolic number N representing the result of a bitwise OR on 2
428 symbolic number N1 and N2 whose source statements are respectively
429 SOURCE_STMT1 and SOURCE_STMT2. */
432 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
433 gimple
*source_stmt2
, struct symbolic_number
*n2
,
434 struct symbolic_number
*n
)
439 struct symbolic_number
*n_start
;
441 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
442 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
443 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
444 rhs1
= TREE_OPERAND (rhs1
, 0);
445 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
446 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
447 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
448 rhs2
= TREE_OPERAND (rhs2
, 0);
450 /* Sources are different, cancel bswap if they are not memory location with
451 the same base (array, structure, ...). */
455 HOST_WIDE_INT start_sub
, end_sub
, end1
, end2
, end
;
456 struct symbolic_number
*toinc_n_ptr
, *n_end
;
457 basic_block bb1
, bb2
;
459 if (!n1
->base_addr
|| !n2
->base_addr
460 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
463 if (!n1
->offset
!= !n2
->offset
464 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
467 if (n1
->bytepos
< n2
->bytepos
)
470 start_sub
= n2
->bytepos
- n1
->bytepos
;
475 start_sub
= n1
->bytepos
- n2
->bytepos
;
478 bb1
= gimple_bb (source_stmt1
);
479 bb2
= gimple_bb (source_stmt2
);
480 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
481 source_stmt
= source_stmt1
;
483 source_stmt
= source_stmt2
;
485 /* Find the highest address at which a load is performed and
486 compute related info. */
487 end1
= n1
->bytepos
+ (n1
->range
- 1);
488 end2
= n2
->bytepos
+ (n2
->range
- 1);
492 end_sub
= end2
- end1
;
497 end_sub
= end1
- end2
;
499 n_end
= (end2
> end1
) ? n2
: n1
;
501 /* Find symbolic number whose lsb is the most significant. */
502 if (BYTES_BIG_ENDIAN
)
503 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
505 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
507 n
->range
= end
- n_start
->bytepos
+ 1;
509 /* Check that the range of memory covered can be represented by
510 a symbolic number. */
511 if (n
->range
> 64 / BITS_PER_MARKER
)
514 /* Reinterpret byte marks in symbolic number holding the value of
515 bigger weight according to target endianness. */
516 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
517 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
518 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
521 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
522 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
523 toinc_n_ptr
->n
+= inc
;
528 n
->range
= n1
->range
;
530 source_stmt
= source_stmt1
;
534 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
535 n
->alias_set
= n1
->alias_set
;
537 n
->alias_set
= ptr_type_node
;
538 n
->vuse
= n_start
->vuse
;
539 n
->base_addr
= n_start
->base_addr
;
540 n
->offset
= n_start
->offset
;
541 n
->src
= n_start
->src
;
542 n
->bytepos
= n_start
->bytepos
;
543 n
->type
= n_start
->type
;
544 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
546 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
548 uint64_t masked1
, masked2
;
550 masked1
= n1
->n
& mask
;
551 masked2
= n2
->n
& mask
;
552 if (masked1
&& masked2
&& masked1
!= masked2
)
555 n
->n
= n1
->n
| n2
->n
;
556 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
561 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
562 the operation given by the rhs of STMT on the result. If the operation
563 could successfully be executed the function returns a gimple stmt whose
564 rhs's first tree is the expression of the source operand and NULL
568 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
571 tree rhs1
, rhs2
= NULL
;
572 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
573 enum gimple_rhs_class rhs_class
;
575 if (!limit
|| !is_gimple_assign (stmt
))
578 rhs1
= gimple_assign_rhs1 (stmt
);
580 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
583 /* Handle BIT_FIELD_REF. */
584 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
585 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
587 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
588 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
589 if (bitpos
% BITS_PER_UNIT
== 0
590 && bitsize
% BITS_PER_UNIT
== 0
591 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
593 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
594 if (BYTES_BIG_ENDIAN
)
595 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
598 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
603 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
604 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
605 i
++, tmp
<<= BITS_PER_UNIT
)
606 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
610 n
->type
= TREE_TYPE (rhs1
);
612 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
614 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
620 if (TREE_CODE (rhs1
) != SSA_NAME
)
623 code
= gimple_assign_rhs_code (stmt
);
624 rhs_class
= gimple_assign_rhs_class (stmt
);
625 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
627 if (rhs_class
== GIMPLE_BINARY_RHS
)
628 rhs2
= gimple_assign_rhs2 (stmt
);
630 /* Handle unary rhs and binary rhs with integer constants as second
633 if (rhs_class
== GIMPLE_UNARY_RHS
634 || (rhs_class
== GIMPLE_BINARY_RHS
635 && TREE_CODE (rhs2
) == INTEGER_CST
))
637 if (code
!= BIT_AND_EXPR
638 && code
!= LSHIFT_EXPR
639 && code
!= RSHIFT_EXPR
640 && code
!= LROTATE_EXPR
641 && code
!= RROTATE_EXPR
642 && !CONVERT_EXPR_CODE_P (code
))
645 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
647 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
648 we have to initialize the symbolic number. */
651 if (gimple_assign_load_p (stmt
)
652 || !init_symbolic_number (n
, rhs1
))
661 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
662 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
663 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
665 /* Only constants masking full bytes are allowed. */
666 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
667 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
670 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
679 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
684 int i
, type_size
, old_type_size
;
687 type
= gimple_expr_type (stmt
);
688 type_size
= TYPE_PRECISION (type
);
689 if (type_size
% BITS_PER_UNIT
!= 0)
691 type_size
/= BITS_PER_UNIT
;
692 if (type_size
> 64 / BITS_PER_MARKER
)
695 /* Sign extension: result is dependent on the value. */
696 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
697 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
698 && HEAD_MARKER (n
->n
, old_type_size
))
699 for (i
= 0; i
< type_size
- old_type_size
; i
++)
700 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
701 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
703 if (type_size
< 64 / BITS_PER_MARKER
)
705 /* If STMT casts to a smaller type mask out the bits not
706 belonging to the target type. */
707 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
711 n
->range
= type_size
;
717 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
720 /* Handle binary rhs. */
722 if (rhs_class
== GIMPLE_BINARY_RHS
)
724 struct symbolic_number n1
, n2
;
725 gimple
*source_stmt
, *source_stmt2
;
727 if (code
!= BIT_IOR_EXPR
)
730 if (TREE_CODE (rhs2
) != SSA_NAME
)
733 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
738 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
743 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
748 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
751 if (n1
.vuse
!= n2
.vuse
)
755 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
760 if (!verify_symbolic_number_p (n
, stmt
))
772 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
773 *CMPXCHG, *CMPNOP and adjust *N. */
776 find_bswap_or_nop_finalize (struct symbolic_number
*n
, uint64_t *cmpxchg
,
782 /* The number which the find_bswap_or_nop_1 result should match in order
783 to have a full byte swap. The number is shifted to the right
784 according to the size of the symbolic number before using it. */
788 /* Find real size of result (highest non-zero byte). */
790 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
794 /* Zero out the bits corresponding to untouched bytes in original gimple
796 if (n
->range
< (int) sizeof (int64_t))
798 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
799 *cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
803 /* Zero out the bits corresponding to unused bytes in the result of the
804 gimple expression. */
805 if (rsize
< n
->range
)
807 if (BYTES_BIG_ENDIAN
)
809 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
811 *cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
815 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
816 *cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
822 n
->range
*= BITS_PER_UNIT
;
825 /* Check if STMT completes a bswap implementation or a read in a given
826 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
827 accordingly. It also sets N to represent the kind of operations
828 performed: size of the resulting expression and whether it works on
829 a memory source, and if so alias-set and vuse. At last, the
830 function returns a stmt whose rhs's first tree is the source
834 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
836 /* The last parameter determines the depth search limit. It usually
837 correlates directly to the number n of bytes to be touched. We
838 increase that number by log2(n) + 1 here in order to also
839 cover signed -> unsigned conversions of the src operand as can be seen
840 in libgcc, and for initial shift/and operation of the src operand. */
841 int limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
842 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
843 gimple
*ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
848 uint64_t cmpxchg
, cmpnop
;
849 find_bswap_or_nop_finalize (n
, &cmpxchg
, &cmpnop
);
851 /* A complete byte swap should make the symbolic number to start with
852 the largest digit in the highest order byte. Unchanged symbolic
853 number indicates a read with same endianness as target architecture. */
856 else if (n
->n
== cmpxchg
)
861 /* Useless bit manipulation performed by code. */
862 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
868 const pass_data pass_data_optimize_bswap
=
870 GIMPLE_PASS
, /* type */
872 OPTGROUP_NONE
, /* optinfo_flags */
874 PROP_ssa
, /* properties_required */
875 0, /* properties_provided */
876 0, /* properties_destroyed */
877 0, /* todo_flags_start */
878 0, /* todo_flags_finish */
881 class pass_optimize_bswap
: public gimple_opt_pass
884 pass_optimize_bswap (gcc::context
*ctxt
)
885 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
888 /* opt_pass methods: */
889 virtual bool gate (function
*)
891 return flag_expensive_optimizations
&& optimize
&& BITS_PER_UNIT
== 8;
894 virtual unsigned int execute (function
*);
896 }; // class pass_optimize_bswap
898 /* Perform the bswap optimization: replace the expression computed in the rhs
899 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
900 bswap, load or load + bswap expression.
901 Which of these alternatives replace the rhs is given by N->base_addr (non
902 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
903 load to perform are also given in N while the builtin bswap invoke is given
904 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
905 load statements involved to construct the rhs in gsi_stmt (GSI) and
906 N->range gives the size of the rhs expression for maintaining some
909 Note that if the replacement involve a load and if gsi_stmt (GSI) is
910 non-NULL, that stmt is moved just after INS_STMT to do the load with the
911 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
914 bswap_replace (gimple_stmt_iterator gsi
, gimple
*ins_stmt
, tree fndecl
,
915 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
918 tree src
, tmp
, tgt
= NULL_TREE
;
921 gimple
*cur_stmt
= gsi_stmt (gsi
);
924 tgt
= gimple_assign_lhs (cur_stmt
);
926 /* Need to load the value from memory first. */
929 gimple_stmt_iterator gsi_ins
= gsi
;
931 gsi_ins
= gsi_for_stmt (ins_stmt
);
932 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
933 tree load_offset_ptr
, aligned_load_type
;
935 unsigned align
= get_object_alignment (src
);
936 HOST_WIDE_INT load_offset
= 0;
940 basic_block ins_bb
= gimple_bb (ins_stmt
);
941 basic_block cur_bb
= gimple_bb (cur_stmt
);
942 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
945 /* Move cur_stmt just before one of the load of the original
946 to ensure it has the same VUSE. See PR61517 for what could
948 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
949 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
950 gsi_move_before (&gsi
, &gsi_ins
);
951 gsi
= gsi_for_stmt (cur_stmt
);
956 /* Compute address to load from and cast according to the size
958 addr_expr
= build_fold_addr_expr (src
);
959 if (is_gimple_mem_ref_addr (addr_expr
))
960 addr_tmp
= unshare_expr (addr_expr
);
963 addr_tmp
= unshare_expr (n
->base_addr
);
964 if (!is_gimple_mem_ref_addr (addr_tmp
))
965 addr_tmp
= force_gimple_operand_gsi_1 (&gsi
, addr_tmp
,
966 is_gimple_mem_ref_addr
,
969 load_offset
= n
->bytepos
;
973 = force_gimple_operand_gsi (&gsi
, unshare_expr (n
->offset
),
974 true, NULL_TREE
, true,
977 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp
)),
978 POINTER_PLUS_EXPR
, addr_tmp
, off
);
979 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
980 addr_tmp
= gimple_assign_lhs (stmt
);
984 /* Perform the load. */
985 aligned_load_type
= load_type
;
986 if (align
< TYPE_ALIGN (load_type
))
987 aligned_load_type
= build_aligned_type (load_type
, align
);
988 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
989 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
995 nop_stats
.found_16bit
++;
996 else if (n
->range
== 32)
997 nop_stats
.found_32bit
++;
1000 gcc_assert (n
->range
== 64);
1001 nop_stats
.found_64bit
++;
1004 /* Convert the result of load if necessary. */
1005 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
1007 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
1009 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1010 gimple_set_vuse (load_stmt
, n
->vuse
);
1011 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1012 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
1013 update_stmt (cur_stmt
);
1017 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
1018 gimple_set_vuse (cur_stmt
, n
->vuse
);
1019 update_stmt (cur_stmt
);
1023 tgt
= make_ssa_name (load_type
);
1024 cur_stmt
= gimple_build_assign (tgt
, MEM_REF
, val_expr
);
1025 gimple_set_vuse (cur_stmt
, n
->vuse
);
1026 gsi_insert_before (&gsi
, cur_stmt
, GSI_SAME_STMT
);
1032 "%d bit load in target endianness found at: ",
1034 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1040 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
1041 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1042 gimple_set_vuse (load_stmt
, n
->vuse
);
1043 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1050 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
1052 if (!is_gimple_val (src
))
1054 g
= gimple_build_assign (tgt
, NOP_EXPR
, src
);
1057 g
= gimple_build_assign (tgt
, src
);
1061 nop_stats
.found_16bit
++;
1062 else if (n
->range
== 32)
1063 nop_stats
.found_32bit
++;
1066 gcc_assert (n
->range
== 64);
1067 nop_stats
.found_64bit
++;
1072 "%d bit reshuffle in target endianness found at: ",
1075 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1078 print_generic_expr (dump_file
, tgt
, 0);
1079 fprintf (dump_file
, "\n");
1083 gsi_replace (&gsi
, g
, true);
1086 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
1087 src
= TREE_OPERAND (src
, 0);
1090 bswap_stats
.found_16bit
++;
1091 else if (n
->range
== 32)
1092 bswap_stats
.found_32bit
++;
1095 gcc_assert (n
->range
== 64);
1096 bswap_stats
.found_64bit
++;
1101 /* Convert the src expression if necessary. */
1102 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
1104 gimple
*convert_stmt
;
1106 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1107 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
1108 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1111 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1112 are considered as rotation of 2N bit values by N bits is generally not
1113 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1114 gives 0x03040102 while a bswap for that value is 0x04030201. */
1115 if (bswap
&& n
->range
== 16)
1117 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
1118 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
1119 bswap_stmt
= gimple_build_assign (NULL
, src
);
1122 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
1124 if (tgt
== NULL_TREE
)
1125 tgt
= make_ssa_name (bswap_type
);
1128 /* Convert the result if necessary. */
1129 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
1131 gimple
*convert_stmt
;
1133 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1134 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
1135 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1138 gimple_set_lhs (bswap_stmt
, tmp
);
1142 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1145 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1148 print_generic_expr (dump_file
, tgt
, 0);
1149 fprintf (dump_file
, "\n");
1155 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1156 gsi_remove (&gsi
, true);
1159 gsi_insert_before (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1163 /* Find manual byte swap implementations as well as load in a given
1164 endianness. Byte swaps are turned into a bswap builtin invokation
1165 while endian loads are converted to bswap builtin invokation or
1166 simple load according to the target endianness. */
1169 pass_optimize_bswap::execute (function
*fun
)
1172 bool bswap32_p
, bswap64_p
;
1173 bool changed
= false;
1174 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1176 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1177 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1178 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1179 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1180 || (bswap32_p
&& word_mode
== SImode
)));
1182 /* Determine the argument type of the builtins. The code later on
1183 assumes that the return and argument type are the same. */
1186 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1187 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1192 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1193 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1196 memset (&nop_stats
, 0, sizeof (nop_stats
));
1197 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1198 calculate_dominance_info (CDI_DOMINATORS
);
1200 FOR_EACH_BB_FN (bb
, fun
)
1202 gimple_stmt_iterator gsi
;
1204 /* We do a reverse scan for bswap patterns to make sure we get the
1205 widest match. As bswap pattern matching doesn't handle previously
1206 inserted smaller bswap replacements as sub-patterns, the wider
1207 variant wouldn't be detected. */
1208 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
1210 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
1211 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1212 enum tree_code code
;
1213 struct symbolic_number n
;
1216 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1217 might be moved to a different basic block by bswap_replace and gsi
1218 must not points to it if that's the case. Moving the gsi_prev
1219 there make sure that gsi points to the statement previous to
1220 cur_stmt while still making sure that all statements are
1221 considered in this basic block. */
1224 if (!is_gimple_assign (cur_stmt
))
1227 code
= gimple_assign_rhs_code (cur_stmt
);
1232 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
1233 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
1243 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1251 /* Already in canonical form, nothing to do. */
1252 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1254 load_type
= bswap_type
= uint16_type_node
;
1257 load_type
= uint32_type_node
;
1260 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1261 bswap_type
= bswap32_type
;
1265 load_type
= uint64_type_node
;
1268 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1269 bswap_type
= bswap64_type
;
1276 if (bswap
&& !fndecl
&& n
.range
!= 16)
1279 if (bswap_replace (gsi_for_stmt (cur_stmt
), ins_stmt
, fndecl
,
1280 bswap_type
, load_type
, &n
, bswap
))
1285 statistics_counter_event (fun
, "16-bit nop implementations found",
1286 nop_stats
.found_16bit
);
1287 statistics_counter_event (fun
, "32-bit nop implementations found",
1288 nop_stats
.found_32bit
);
1289 statistics_counter_event (fun
, "64-bit nop implementations found",
1290 nop_stats
.found_64bit
);
1291 statistics_counter_event (fun
, "16-bit bswap implementations found",
1292 bswap_stats
.found_16bit
);
1293 statistics_counter_event (fun
, "32-bit bswap implementations found",
1294 bswap_stats
.found_32bit
);
1295 statistics_counter_event (fun
, "64-bit bswap implementations found",
1296 bswap_stats
.found_64bit
);
1298 return (changed
? TODO_update_ssa
: 0);
1304 make_pass_optimize_bswap (gcc::context
*ctxt
)
1306 return new pass_optimize_bswap (ctxt
);
1311 /* Struct recording one operand for the store, which is either a constant,
1312 then VAL represents the constant and all the other fields are zero,
1313 or a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1314 and the other fields also reflect the memory load. */
1316 struct store_operand_info
1320 unsigned HOST_WIDE_INT bitsize
;
1321 unsigned HOST_WIDE_INT bitpos
;
1322 unsigned HOST_WIDE_INT bitregion_start
;
1323 unsigned HOST_WIDE_INT bitregion_end
;
1326 store_operand_info ();
1329 store_operand_info::store_operand_info ()
1330 : val (NULL_TREE
), base_addr (NULL_TREE
), bitsize (0), bitpos (0),
1331 bitregion_start (0), bitregion_end (0), stmt (NULL
), bit_not_p (false)
1335 /* Struct recording the information about a single store of an immediate
1336 to memory. These are created in the first phase and coalesced into
1337 merged_store_group objects in the second phase. */
1339 struct store_immediate_info
1341 unsigned HOST_WIDE_INT bitsize
;
1342 unsigned HOST_WIDE_INT bitpos
;
1343 unsigned HOST_WIDE_INT bitregion_start
;
1344 /* This is one past the last bit of the bit region. */
1345 unsigned HOST_WIDE_INT bitregion_end
;
1348 /* INTEGER_CST for constant stores, MEM_REF for memory copy or
1349 BIT_*_EXPR for logical bitwise operation.
1350 LROTATE_EXPR if it can be only bswap optimized and
1351 ops are not really meaningful.
1352 NOP_EXPR if bswap optimization detected identity, ops
1353 are not meaningful. */
1354 enum tree_code rhs_code
;
1355 /* Two fields for bswap optimization purposes. */
1356 struct symbolic_number n
;
1358 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1360 /* True if ops have been swapped and thus ops[1] represents
1361 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1363 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1364 just the first one. */
1365 store_operand_info ops
[2];
1366 store_immediate_info (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1367 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1368 gimple
*, unsigned int, enum tree_code
,
1369 struct symbolic_number
&, gimple
*, bool,
1370 const store_operand_info
&,
1371 const store_operand_info
&);
1374 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs
,
1375 unsigned HOST_WIDE_INT bp
,
1376 unsigned HOST_WIDE_INT brs
,
1377 unsigned HOST_WIDE_INT bre
,
1380 enum tree_code rhscode
,
1381 struct symbolic_number
&nr
,
1384 const store_operand_info
&op0r
,
1385 const store_operand_info
&op1r
)
1386 : bitsize (bs
), bitpos (bp
), bitregion_start (brs
), bitregion_end (bre
),
1387 stmt (st
), order (ord
), rhs_code (rhscode
), n (nr
),
1388 ins_stmt (ins_stmtp
), bit_not_p (bitnotp
), ops_swapped_p (false)
1389 #if __cplusplus >= 201103L
1390 , ops
{ op0r
, op1r
}
1400 /* Struct representing a group of stores to contiguous memory locations.
1401 These are produced by the second phase (coalescing) and consumed in the
1402 third phase that outputs the widened stores. */
1404 struct merged_store_group
1406 unsigned HOST_WIDE_INT start
;
1407 unsigned HOST_WIDE_INT width
;
1408 unsigned HOST_WIDE_INT bitregion_start
;
1409 unsigned HOST_WIDE_INT bitregion_end
;
1410 /* The size of the allocated memory for val and mask. */
1411 unsigned HOST_WIDE_INT buf_size
;
1412 unsigned HOST_WIDE_INT align_base
;
1413 unsigned HOST_WIDE_INT load_align_base
[2];
1416 unsigned int load_align
[2];
1417 unsigned int first_order
;
1418 unsigned int last_order
;
1420 auto_vec
<store_immediate_info
*> stores
;
1421 /* We record the first and last original statements in the sequence because
1422 we'll need their vuse/vdef and replacement position. It's easier to keep
1423 track of them separately as 'stores' is reordered by apply_stores. */
1427 unsigned char *mask
;
1429 merged_store_group (store_immediate_info
*);
1430 ~merged_store_group ();
1431 void merge_into (store_immediate_info
*);
1432 void merge_overlapping (store_immediate_info
*);
1433 bool apply_stores ();
1435 void do_merge (store_immediate_info
*);
1438 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1441 dump_char_array (FILE *fd
, unsigned char *ptr
, unsigned int len
)
1446 for (unsigned int i
= 0; i
< len
; i
++)
1447 fprintf (fd
, "%x ", ptr
[i
]);
1451 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
1452 bits between adjacent elements. AMNT should be within
1455 00011111|11100000 << 2 = 01111111|10000000
1456 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
1459 shift_bytes_in_array (unsigned char *ptr
, unsigned int sz
, unsigned int amnt
)
1464 unsigned char carry_over
= 0U;
1465 unsigned char carry_mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- amnt
);
1466 unsigned char clear_mask
= (~0U) << amnt
;
1468 for (unsigned int i
= 0; i
< sz
; i
++)
1470 unsigned prev_carry_over
= carry_over
;
1471 carry_over
= (ptr
[i
] & carry_mask
) >> (BITS_PER_UNIT
- amnt
);
1476 ptr
[i
] &= clear_mask
;
1477 ptr
[i
] |= prev_carry_over
;
1482 /* Like shift_bytes_in_array but for big-endian.
1483 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
1484 bits between adjacent elements. AMNT should be within
1487 00011111|11100000 >> 2 = 00000111|11111000
1488 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
1491 shift_bytes_in_array_right (unsigned char *ptr
, unsigned int sz
,
1497 unsigned char carry_over
= 0U;
1498 unsigned char carry_mask
= ~(~0U << amnt
);
1500 for (unsigned int i
= 0; i
< sz
; i
++)
1502 unsigned prev_carry_over
= carry_over
;
1503 carry_over
= ptr
[i
] & carry_mask
;
1505 carry_over
<<= (unsigned char) BITS_PER_UNIT
- amnt
;
1507 ptr
[i
] |= prev_carry_over
;
1511 /* Clear out LEN bits starting from bit START in the byte array
1512 PTR. This clears the bits to the *right* from START.
1513 START must be within [0, BITS_PER_UNIT) and counts starting from
1514 the least significant bit. */
1517 clear_bit_region_be (unsigned char *ptr
, unsigned int start
,
1522 /* Clear len bits to the right of start. */
1523 else if (len
<= start
+ 1)
1525 unsigned char mask
= (~(~0U << len
));
1526 mask
= mask
<< (start
+ 1U - len
);
1529 else if (start
!= BITS_PER_UNIT
- 1)
1531 clear_bit_region_be (ptr
, start
, (start
% BITS_PER_UNIT
) + 1);
1532 clear_bit_region_be (ptr
+ 1, BITS_PER_UNIT
- 1,
1533 len
- (start
% BITS_PER_UNIT
) - 1);
1535 else if (start
== BITS_PER_UNIT
- 1
1536 && len
> BITS_PER_UNIT
)
1538 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1539 memset (ptr
, 0, nbytes
);
1540 if (len
% BITS_PER_UNIT
!= 0)
1541 clear_bit_region_be (ptr
+ nbytes
, BITS_PER_UNIT
- 1,
1542 len
% BITS_PER_UNIT
);
1548 /* In the byte array PTR clear the bit region starting at bit
1549 START and is LEN bits wide.
1550 For regions spanning multiple bytes do this recursively until we reach
1551 zero LEN or a region contained within a single byte. */
1554 clear_bit_region (unsigned char *ptr
, unsigned int start
,
1557 /* Degenerate base case. */
1560 else if (start
>= BITS_PER_UNIT
)
1561 clear_bit_region (ptr
+ 1, start
- BITS_PER_UNIT
, len
);
1562 /* Second base case. */
1563 else if ((start
+ len
) <= BITS_PER_UNIT
)
1565 unsigned char mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- len
);
1566 mask
>>= BITS_PER_UNIT
- (start
+ len
);
1572 /* Clear most significant bits in a byte and proceed with the next byte. */
1573 else if (start
!= 0)
1575 clear_bit_region (ptr
, start
, BITS_PER_UNIT
- start
);
1576 clear_bit_region (ptr
+ 1, 0, len
- (BITS_PER_UNIT
- start
));
1578 /* Whole bytes need to be cleared. */
1579 else if (start
== 0 && len
> BITS_PER_UNIT
)
1581 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1582 /* We could recurse on each byte but we clear whole bytes, so a simple
1584 memset (ptr
, '\0', nbytes
);
1585 /* Clear the remaining sub-byte region if there is one. */
1586 if (len
% BITS_PER_UNIT
!= 0)
1587 clear_bit_region (ptr
+ nbytes
, 0, len
% BITS_PER_UNIT
);
1593 /* Write BITLEN bits of EXPR to the byte array PTR at
1594 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1595 Return true if the operation succeeded. */
1598 encode_tree_to_bitpos (tree expr
, unsigned char *ptr
, int bitlen
, int bitpos
,
1599 unsigned int total_bytes
)
1601 unsigned int first_byte
= bitpos
/ BITS_PER_UNIT
;
1602 tree tmp_int
= expr
;
1603 bool sub_byte_op_p
= ((bitlen
% BITS_PER_UNIT
)
1604 || (bitpos
% BITS_PER_UNIT
)
1605 || !int_mode_for_size (bitlen
, 0).exists ());
1608 return native_encode_expr (tmp_int
, ptr
+ first_byte
, total_bytes
) != 0;
1611 We are writing a non byte-sized quantity or at a position that is not
1613 |--------|--------|--------| ptr + first_byte
1615 xxx xxxxxxxx xxx< bp>
1618 First native_encode_expr EXPR into a temporary buffer and shift each
1619 byte in the buffer by 'bp' (carrying the bits over as necessary).
1620 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1621 <------bitlen---->< bp>
1622 Then we clear the destination bits:
1623 |---00000|00000000|000-----| ptr + first_byte
1624 <-------bitlen--->< bp>
1626 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1627 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1630 We are writing a non byte-sized quantity or at a position that is not
1632 ptr + first_byte |--------|--------|--------|
1634 <bp >xxx xxxxxxxx xxx
1637 First native_encode_expr EXPR into a temporary buffer and shift each
1638 byte in the buffer to the right by (carrying the bits over as necessary).
1639 We shift by as much as needed to align the most significant bit of EXPR
1641 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1642 <---bitlen----> <bp ><-----bitlen----->
1643 Then we clear the destination bits:
1644 ptr + first_byte |-----000||00000000||00000---|
1645 <bp ><-------bitlen----->
1647 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1648 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1649 The awkwardness comes from the fact that bitpos is counted from the
1650 most significant bit of a byte. */
1652 /* We must be dealing with fixed-size data at this point, since the
1653 total size is also fixed. */
1654 fixed_size_mode mode
= as_a
<fixed_size_mode
> (TYPE_MODE (TREE_TYPE (expr
)));
1655 /* Allocate an extra byte so that we have space to shift into. */
1656 unsigned int byte_size
= GET_MODE_SIZE (mode
) + 1;
1657 unsigned char *tmpbuf
= XALLOCAVEC (unsigned char, byte_size
);
1658 memset (tmpbuf
, '\0', byte_size
);
1659 /* The store detection code should only have allowed constants that are
1660 accepted by native_encode_expr. */
1661 if (native_encode_expr (expr
, tmpbuf
, byte_size
- 1) == 0)
1664 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1665 bytes to write. This means it can write more than
1666 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1667 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1668 bitlen and zero out the bits that are not relevant as well (that may
1669 contain a sign bit due to sign-extension). */
1670 unsigned int padding
1671 = byte_size
- ROUND_UP (bitlen
, BITS_PER_UNIT
) / BITS_PER_UNIT
- 1;
1672 /* On big-endian the padding is at the 'front' so just skip the initial
1674 if (BYTES_BIG_ENDIAN
)
1677 byte_size
-= padding
;
1679 if (bitlen
% BITS_PER_UNIT
!= 0)
1681 if (BYTES_BIG_ENDIAN
)
1682 clear_bit_region_be (tmpbuf
, BITS_PER_UNIT
- 1,
1683 BITS_PER_UNIT
- (bitlen
% BITS_PER_UNIT
));
1685 clear_bit_region (tmpbuf
, bitlen
,
1686 byte_size
* BITS_PER_UNIT
- bitlen
);
1688 /* Left shifting relies on the last byte being clear if bitlen is
1689 a multiple of BITS_PER_UNIT, which might not be clear if
1690 there are padding bytes. */
1691 else if (!BYTES_BIG_ENDIAN
)
1692 tmpbuf
[byte_size
- 1] = '\0';
1694 /* Clear the bit region in PTR where the bits from TMPBUF will be
1696 if (BYTES_BIG_ENDIAN
)
1697 clear_bit_region_be (ptr
+ first_byte
,
1698 BITS_PER_UNIT
- 1 - (bitpos
% BITS_PER_UNIT
), bitlen
);
1700 clear_bit_region (ptr
+ first_byte
, bitpos
% BITS_PER_UNIT
, bitlen
);
1703 int bitlen_mod
= bitlen
% BITS_PER_UNIT
;
1704 int bitpos_mod
= bitpos
% BITS_PER_UNIT
;
1706 bool skip_byte
= false;
1707 if (BYTES_BIG_ENDIAN
)
1709 /* BITPOS and BITLEN are exactly aligned and no shifting
1711 if (bitpos_mod
+ bitlen_mod
== BITS_PER_UNIT
1712 || (bitpos_mod
== 0 && bitlen_mod
== 0))
1714 /* |. . . . . . . .|
1716 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1717 of the value until it aligns with 'bp' in the next byte over. */
1718 else if (bitpos_mod
+ bitlen_mod
< BITS_PER_UNIT
)
1720 shift_amnt
= bitlen_mod
+ bitpos_mod
;
1721 skip_byte
= bitlen_mod
!= 0;
1723 /* |. . . . . . . .|
1726 Shift the value right within the same byte so it aligns with 'bp'. */
1728 shift_amnt
= bitlen_mod
+ bitpos_mod
- BITS_PER_UNIT
;
1731 shift_amnt
= bitpos
% BITS_PER_UNIT
;
1733 /* Create the shifted version of EXPR. */
1734 if (!BYTES_BIG_ENDIAN
)
1736 shift_bytes_in_array (tmpbuf
, byte_size
, shift_amnt
);
1737 if (shift_amnt
== 0)
1742 gcc_assert (BYTES_BIG_ENDIAN
);
1743 shift_bytes_in_array_right (tmpbuf
, byte_size
, shift_amnt
);
1744 /* If shifting right forced us to move into the next byte skip the now
1753 /* Insert the bits from TMPBUF. */
1754 for (unsigned int i
= 0; i
< byte_size
; i
++)
1755 ptr
[first_byte
+ i
] |= tmpbuf
[i
];
1760 /* Sorting function for store_immediate_info objects.
1761 Sorts them by bitposition. */
1764 sort_by_bitpos (const void *x
, const void *y
)
1766 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1767 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1769 if ((*tmp
)->bitpos
< (*tmp2
)->bitpos
)
1771 else if ((*tmp
)->bitpos
> (*tmp2
)->bitpos
)
1774 /* If they are the same let's use the order which is guaranteed to
1776 return (*tmp
)->order
- (*tmp2
)->order
;
1779 /* Sorting function for store_immediate_info objects.
1780 Sorts them by the order field. */
1783 sort_by_order (const void *x
, const void *y
)
1785 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1786 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1788 if ((*tmp
)->order
< (*tmp2
)->order
)
1790 else if ((*tmp
)->order
> (*tmp2
)->order
)
1796 /* Initialize a merged_store_group object from a store_immediate_info
1799 merged_store_group::merged_store_group (store_immediate_info
*info
)
1801 start
= info
->bitpos
;
1802 width
= info
->bitsize
;
1803 bitregion_start
= info
->bitregion_start
;
1804 bitregion_end
= info
->bitregion_end
;
1805 /* VAL has memory allocated for it in apply_stores once the group
1806 width has been finalized. */
1809 unsigned HOST_WIDE_INT align_bitpos
= 0;
1810 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1811 &align
, &align_bitpos
);
1812 align_base
= start
- align_bitpos
;
1813 for (int i
= 0; i
< 2; ++i
)
1815 store_operand_info
&op
= info
->ops
[i
];
1816 if (op
.base_addr
== NULL_TREE
)
1819 load_align_base
[i
] = 0;
1823 get_object_alignment_1 (op
.val
, &load_align
[i
], &align_bitpos
);
1824 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1828 stores
.safe_push (info
);
1829 last_stmt
= info
->stmt
;
1830 last_order
= info
->order
;
1831 first_stmt
= last_stmt
;
1832 first_order
= last_order
;
1836 merged_store_group::~merged_store_group ()
1842 /* Helper method for merge_into and merge_overlapping to do
1845 merged_store_group::do_merge (store_immediate_info
*info
)
1847 bitregion_start
= MIN (bitregion_start
, info
->bitregion_start
);
1848 bitregion_end
= MAX (bitregion_end
, info
->bitregion_end
);
1850 unsigned int this_align
;
1851 unsigned HOST_WIDE_INT align_bitpos
= 0;
1852 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1853 &this_align
, &align_bitpos
);
1854 if (this_align
> align
)
1857 align_base
= info
->bitpos
- align_bitpos
;
1859 for (int i
= 0; i
< 2; ++i
)
1861 store_operand_info
&op
= info
->ops
[i
];
1865 get_object_alignment_1 (op
.val
, &this_align
, &align_bitpos
);
1866 if (this_align
> load_align
[i
])
1868 load_align
[i
] = this_align
;
1869 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1873 gimple
*stmt
= info
->stmt
;
1874 stores
.safe_push (info
);
1875 if (info
->order
> last_order
)
1877 last_order
= info
->order
;
1880 else if (info
->order
< first_order
)
1882 first_order
= info
->order
;
1887 /* Merge a store recorded by INFO into this merged store.
1888 The store is not overlapping with the existing recorded
1892 merged_store_group::merge_into (store_immediate_info
*info
)
1894 unsigned HOST_WIDE_INT wid
= info
->bitsize
;
1895 /* Make sure we're inserting in the position we think we're inserting. */
1896 gcc_assert (info
->bitpos
>= start
+ width
1897 && info
->bitregion_start
<= bitregion_end
);
1903 /* Merge a store described by INFO into this merged store.
1904 INFO overlaps in some way with the current store (i.e. it's not contiguous
1905 which is handled by merged_store_group::merge_into). */
1908 merged_store_group::merge_overlapping (store_immediate_info
*info
)
1910 /* If the store extends the size of the group, extend the width. */
1911 if (info
->bitpos
+ info
->bitsize
> start
+ width
)
1912 width
+= info
->bitpos
+ info
->bitsize
- (start
+ width
);
1917 /* Go through all the recorded stores in this group in program order and
1918 apply their values to the VAL byte array to create the final merged
1919 value. Return true if the operation succeeded. */
1922 merged_store_group::apply_stores ()
1924 /* Make sure we have more than one store in the group, otherwise we cannot
1926 if (bitregion_start
% BITS_PER_UNIT
!= 0
1927 || bitregion_end
% BITS_PER_UNIT
!= 0
1928 || stores
.length () == 1)
1931 stores
.qsort (sort_by_order
);
1932 store_immediate_info
*info
;
1934 /* Create a buffer of a size that is 2 times the number of bytes we're
1935 storing. That way native_encode_expr can write power-of-2-sized
1936 chunks without overrunning. */
1937 buf_size
= 2 * ((bitregion_end
- bitregion_start
) / BITS_PER_UNIT
);
1938 val
= XNEWVEC (unsigned char, 2 * buf_size
);
1939 mask
= val
+ buf_size
;
1940 memset (val
, 0, buf_size
);
1941 memset (mask
, ~0U, buf_size
);
1943 FOR_EACH_VEC_ELT (stores
, i
, info
)
1945 unsigned int pos_in_buffer
= info
->bitpos
- bitregion_start
;
1946 tree cst
= NULL_TREE
;
1947 if (info
->ops
[0].val
&& info
->ops
[0].base_addr
== NULL_TREE
)
1948 cst
= info
->ops
[0].val
;
1949 else if (info
->ops
[1].val
&& info
->ops
[1].base_addr
== NULL_TREE
)
1950 cst
= info
->ops
[1].val
;
1953 ret
= encode_tree_to_bitpos (cst
, val
, info
->bitsize
,
1954 pos_in_buffer
, buf_size
);
1955 if (cst
&& dump_file
&& (dump_flags
& TDF_DETAILS
))
1959 fprintf (dump_file
, "After writing ");
1960 print_generic_expr (dump_file
, cst
, 0);
1961 fprintf (dump_file
, " of size " HOST_WIDE_INT_PRINT_DEC
1962 " at position %d the merged region contains:\n",
1963 info
->bitsize
, pos_in_buffer
);
1964 dump_char_array (dump_file
, val
, buf_size
);
1967 fprintf (dump_file
, "Failed to merge stores\n");
1971 unsigned char *m
= mask
+ (pos_in_buffer
/ BITS_PER_UNIT
);
1972 if (BYTES_BIG_ENDIAN
)
1973 clear_bit_region_be (m
, (BITS_PER_UNIT
- 1
1974 - (pos_in_buffer
% BITS_PER_UNIT
)),
1977 clear_bit_region (m
, pos_in_buffer
% BITS_PER_UNIT
, info
->bitsize
);
1979 stores
.qsort (sort_by_bitpos
);
1983 /* Structure describing the store chain. */
1985 struct imm_store_chain_info
1987 /* Doubly-linked list that imposes an order on chain processing.
1988 PNXP (prev's next pointer) points to the head of a list, or to
1989 the next field in the previous chain in the list.
1990 See pass_store_merging::m_stores_head for more rationale. */
1991 imm_store_chain_info
*next
, **pnxp
;
1993 auto_vec
<store_immediate_info
*> m_store_info
;
1994 auto_vec
<merged_store_group
*> m_merged_store_groups
;
1996 imm_store_chain_info (imm_store_chain_info
*&inspt
, tree b_a
)
1997 : next (inspt
), pnxp (&inspt
), base_addr (b_a
)
2002 gcc_checking_assert (pnxp
== next
->pnxp
);
2006 ~imm_store_chain_info ()
2011 gcc_checking_assert (&next
== next
->pnxp
);
2015 bool terminate_and_process_chain ();
2016 bool try_coalesce_bswap (merged_store_group
*, unsigned int, unsigned int);
2017 bool coalesce_immediate_stores ();
2018 bool output_merged_store (merged_store_group
*);
2019 bool output_merged_stores ();
2022 const pass_data pass_data_tree_store_merging
= {
2023 GIMPLE_PASS
, /* type */
2024 "store-merging", /* name */
2025 OPTGROUP_NONE
, /* optinfo_flags */
2026 TV_GIMPLE_STORE_MERGING
, /* tv_id */
2027 PROP_ssa
, /* properties_required */
2028 0, /* properties_provided */
2029 0, /* properties_destroyed */
2030 0, /* todo_flags_start */
2031 TODO_update_ssa
, /* todo_flags_finish */
2034 class pass_store_merging
: public gimple_opt_pass
2037 pass_store_merging (gcc::context
*ctxt
)
2038 : gimple_opt_pass (pass_data_tree_store_merging
, ctxt
), m_stores_head ()
2042 /* Pass not supported for PDP-endianness, nor for insane hosts
2043 or target character sizes where native_{encode,interpret}_expr
2044 doesn't work properly. */
2048 return flag_store_merging
2049 && WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
2051 && BITS_PER_UNIT
== 8;
2054 virtual unsigned int execute (function
*);
2057 hash_map
<tree_operand_hash
, struct imm_store_chain_info
*> m_stores
;
2059 /* Form a doubly-linked stack of the elements of m_stores, so that
2060 we can iterate over them in a predictable way. Using this order
2061 avoids extraneous differences in the compiler output just because
2062 of tree pointer variations (e.g. different chains end up in
2063 different positions of m_stores, so they are handled in different
2064 orders, so they allocate or release SSA names in different
2065 orders, and when they get reused, subsequent passes end up
2066 getting different SSA names, which may ultimately change
2067 decisions when going out of SSA). */
2068 imm_store_chain_info
*m_stores_head
;
2070 void process_store (gimple
*);
2071 bool terminate_and_process_all_chains ();
2072 bool terminate_all_aliasing_chains (imm_store_chain_info
**, gimple
*);
2073 bool terminate_and_release_chain (imm_store_chain_info
*);
2074 }; // class pass_store_merging
2076 /* Terminate and process all recorded chains. Return true if any changes
2080 pass_store_merging::terminate_and_process_all_chains ()
2083 while (m_stores_head
)
2084 ret
|= terminate_and_release_chain (m_stores_head
);
2085 gcc_assert (m_stores
.elements () == 0);
2086 gcc_assert (m_stores_head
== NULL
);
2091 /* Terminate all chains that are affected by the statement STMT.
2092 CHAIN_INFO is the chain we should ignore from the checks if
2096 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2102 /* If the statement doesn't touch memory it can't alias. */
2103 if (!gimple_vuse (stmt
))
2106 tree store_lhs
= gimple_store_p (stmt
) ? gimple_get_lhs (stmt
) : NULL_TREE
;
2107 for (imm_store_chain_info
*next
= m_stores_head
, *cur
= next
; cur
; cur
= next
)
2111 /* We already checked all the stores in chain_info and terminated the
2112 chain if necessary. Skip it here. */
2113 if (chain_info
&& *chain_info
== cur
)
2116 store_immediate_info
*info
;
2118 FOR_EACH_VEC_ELT (cur
->m_store_info
, i
, info
)
2120 tree lhs
= gimple_assign_lhs (info
->stmt
);
2121 if (ref_maybe_used_by_stmt_p (stmt
, lhs
)
2122 || stmt_may_clobber_ref_p (stmt
, lhs
)
2123 || (store_lhs
&& refs_output_dependent_p (store_lhs
, lhs
)))
2125 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2127 fprintf (dump_file
, "stmt causes chain termination:\n");
2128 print_gimple_stmt (dump_file
, stmt
, 0);
2130 terminate_and_release_chain (cur
);
2140 /* Helper function. Terminate the recorded chain storing to base object
2141 BASE. Return true if the merging and output was successful. The m_stores
2142 entry is removed after the processing in any case. */
2145 pass_store_merging::terminate_and_release_chain (imm_store_chain_info
*chain_info
)
2147 bool ret
= chain_info
->terminate_and_process_chain ();
2148 m_stores
.remove (chain_info
->base_addr
);
2153 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2154 may clobber REF. FIRST and LAST must be in the same basic block and
2155 have non-NULL vdef. We want to be able to sink load of REF across
2156 stores between FIRST and LAST, up to right before LAST. */
2159 stmts_may_clobber_ref_p (gimple
*first
, gimple
*last
, tree ref
)
2162 ao_ref_init (&r
, ref
);
2163 unsigned int count
= 0;
2164 tree vop
= gimple_vdef (last
);
2167 gcc_checking_assert (gimple_bb (first
) == gimple_bb (last
));
2170 stmt
= SSA_NAME_DEF_STMT (vop
);
2171 if (stmt_may_clobber_ref_p_1 (stmt
, &r
))
2173 if (gimple_store_p (stmt
)
2174 && refs_anti_dependent_p (ref
, gimple_get_lhs (stmt
)))
2176 /* Avoid quadratic compile time by bounding the number of checks
2178 if (++count
> MAX_STORE_ALIAS_CHECKS
)
2180 vop
= gimple_vuse (stmt
);
2182 while (stmt
!= first
);
2186 /* Return true if INFO->ops[IDX] is mergeable with the
2187 corresponding loads already in MERGED_STORE group.
2188 BASE_ADDR is the base address of the whole store group. */
2191 compatible_load_p (merged_store_group
*merged_store
,
2192 store_immediate_info
*info
,
2193 tree base_addr
, int idx
)
2195 store_immediate_info
*infof
= merged_store
->stores
[0];
2196 if (!info
->ops
[idx
].base_addr
2197 || (info
->ops
[idx
].bitpos
- infof
->ops
[idx
].bitpos
2198 != info
->bitpos
- infof
->bitpos
)
2199 || !operand_equal_p (info
->ops
[idx
].base_addr
,
2200 infof
->ops
[idx
].base_addr
, 0))
2203 store_immediate_info
*infol
= merged_store
->stores
.last ();
2204 tree load_vuse
= gimple_vuse (info
->ops
[idx
].stmt
);
2205 /* In this case all vuses should be the same, e.g.
2206 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2208 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2209 and we can emit the coalesced load next to any of those loads. */
2210 if (gimple_vuse (infof
->ops
[idx
].stmt
) == load_vuse
2211 && gimple_vuse (infol
->ops
[idx
].stmt
) == load_vuse
)
2214 /* Otherwise, at least for now require that the load has the same
2215 vuse as the store. See following examples. */
2216 if (gimple_vuse (info
->stmt
) != load_vuse
)
2219 if (gimple_vuse (infof
->stmt
) != gimple_vuse (infof
->ops
[idx
].stmt
)
2221 && gimple_vuse (infol
->stmt
) != gimple_vuse (infol
->ops
[idx
].stmt
)))
2224 /* If the load is from the same location as the store, already
2225 the construction of the immediate chain info guarantees no intervening
2226 stores, so no further checks are needed. Example:
2227 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2228 if (info
->ops
[idx
].bitpos
== info
->bitpos
2229 && operand_equal_p (info
->ops
[idx
].base_addr
, base_addr
, 0))
2232 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2233 of the stores in the group, or any other stores in between those.
2234 Previous calls to compatible_load_p ensured that for all the
2235 merged_store->stores IDX loads, no stmts starting with
2236 merged_store->first_stmt and ending right before merged_store->last_stmt
2237 clobbers those loads. */
2238 gimple
*first
= merged_store
->first_stmt
;
2239 gimple
*last
= merged_store
->last_stmt
;
2241 store_immediate_info
*infoc
;
2242 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2243 comes before the so far first load, we'll be changing
2244 merged_store->first_stmt. In that case we need to give up if
2245 any of the earlier processed loads clobber with the stmts in the new
2247 if (info
->order
< merged_store
->first_order
)
2249 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2250 if (stmts_may_clobber_ref_p (info
->stmt
, first
, infoc
->ops
[idx
].val
))
2254 /* Similarly, we could change merged_store->last_stmt, so ensure
2255 in that case no stmts in the new range clobber any of the earlier
2257 else if (info
->order
> merged_store
->last_order
)
2259 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2260 if (stmts_may_clobber_ref_p (last
, info
->stmt
, infoc
->ops
[idx
].val
))
2264 /* And finally, we'd be adding a new load to the set, ensure it isn't
2265 clobbered in the new range. */
2266 if (stmts_may_clobber_ref_p (first
, last
, info
->ops
[idx
].val
))
2269 /* Otherwise, we are looking for:
2270 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2272 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2276 /* Add all refs loaded to compute VAL to REFS vector. */
2279 gather_bswap_load_refs (vec
<tree
> *refs
, tree val
)
2281 if (TREE_CODE (val
) != SSA_NAME
)
2284 gimple
*stmt
= SSA_NAME_DEF_STMT (val
);
2285 if (!is_gimple_assign (stmt
))
2288 if (gimple_assign_load_p (stmt
))
2290 refs
->safe_push (gimple_assign_rhs1 (stmt
));
2294 switch (gimple_assign_rhs_class (stmt
))
2296 case GIMPLE_BINARY_RHS
:
2297 gather_bswap_load_refs (refs
, gimple_assign_rhs2 (stmt
));
2299 case GIMPLE_UNARY_RHS
:
2300 gather_bswap_load_refs (refs
, gimple_assign_rhs1 (stmt
));
2307 /* Return true if m_store_info[first] and at least one following store
2308 form a group which store try_size bitsize value which is byte swapped
2309 from a memory load or some value, or identity from some value.
2310 This uses the bswap pass APIs. */
2313 imm_store_chain_info::try_coalesce_bswap (merged_store_group
*merged_store
,
2315 unsigned int try_size
)
2317 unsigned int len
= m_store_info
.length (), last
= first
;
2318 unsigned HOST_WIDE_INT width
= m_store_info
[first
]->bitsize
;
2319 if (width
>= try_size
)
2321 for (unsigned int i
= first
+ 1; i
< len
; ++i
)
2323 if (m_store_info
[i
]->bitpos
!= m_store_info
[first
]->bitpos
+ width
2324 || m_store_info
[i
]->ins_stmt
== NULL
)
2326 width
+= m_store_info
[i
]->bitsize
;
2327 if (width
>= try_size
)
2333 if (width
!= try_size
)
2336 bool allow_unaligned
2337 = !STRICT_ALIGNMENT
&& PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED
);
2338 /* Punt if the combined store would not be aligned and we need alignment. */
2339 if (!allow_unaligned
)
2341 unsigned int align
= merged_store
->align
;
2342 unsigned HOST_WIDE_INT align_base
= merged_store
->align_base
;
2343 for (unsigned int i
= first
+ 1; i
<= last
; ++i
)
2345 unsigned int this_align
;
2346 unsigned HOST_WIDE_INT align_bitpos
= 0;
2347 get_object_alignment_1 (gimple_assign_lhs (m_store_info
[i
]->stmt
),
2348 &this_align
, &align_bitpos
);
2349 if (this_align
> align
)
2352 align_base
= m_store_info
[i
]->bitpos
- align_bitpos
;
2355 unsigned HOST_WIDE_INT align_bitpos
2356 = (m_store_info
[first
]->bitpos
- align_base
) & (align
- 1);
2358 align
= least_bit_hwi (align_bitpos
);
2359 if (align
< try_size
)
2366 case 16: type
= uint16_type_node
; break;
2367 case 32: type
= uint32_type_node
; break;
2368 case 64: type
= uint64_type_node
; break;
2369 default: gcc_unreachable ();
2371 struct symbolic_number n
;
2372 gimple
*ins_stmt
= NULL
;
2373 int vuse_store
= -1;
2374 unsigned int first_order
= merged_store
->first_order
;
2375 unsigned int last_order
= merged_store
->last_order
;
2376 gimple
*first_stmt
= merged_store
->first_stmt
;
2377 gimple
*last_stmt
= merged_store
->last_stmt
;
2378 store_immediate_info
*infof
= m_store_info
[first
];
2380 for (unsigned int i
= first
; i
<= last
; ++i
)
2382 store_immediate_info
*info
= m_store_info
[i
];
2383 struct symbolic_number this_n
= info
->n
;
2385 if (!this_n
.base_addr
)
2386 this_n
.range
= try_size
/ BITS_PER_UNIT
;
2387 unsigned int bitpos
= info
->bitpos
- infof
->bitpos
;
2388 if (!do_shift_rotate (LSHIFT_EXPR
, &this_n
,
2390 ? try_size
- info
->bitsize
- bitpos
2393 if (this_n
.base_addr
&& vuse_store
)
2396 for (j
= first
; j
<= last
; ++j
)
2397 if (this_n
.vuse
== gimple_vuse (m_store_info
[j
]->stmt
))
2401 if (vuse_store
== 1)
2409 ins_stmt
= info
->ins_stmt
;
2415 if (n
.vuse
!= this_n
.vuse
)
2417 if (vuse_store
== 0)
2421 if (info
->order
> last_order
)
2423 last_order
= info
->order
;
2424 last_stmt
= info
->stmt
;
2426 else if (info
->order
< first_order
)
2428 first_order
= info
->order
;
2429 first_stmt
= info
->stmt
;
2433 ins_stmt
= perform_symbolic_merge (ins_stmt
, &n
, info
->ins_stmt
,
2435 if (ins_stmt
== NULL
)
2440 uint64_t cmpxchg
, cmpnop
;
2441 find_bswap_or_nop_finalize (&n
, &cmpxchg
, &cmpnop
);
2443 /* A complete byte swap should make the symbolic number to start with
2444 the largest digit in the highest order byte. Unchanged symbolic
2445 number indicates a read with same endianness as target architecture. */
2446 if (n
.n
!= cmpnop
&& n
.n
!= cmpxchg
)
2449 if (n
.base_addr
== NULL_TREE
&& !is_gimple_val (n
.src
))
2452 /* Don't handle memory copy this way if normal non-bswap processing
2453 would handle it too. */
2454 if (n
.n
== cmpnop
&& (unsigned) n
.n_ops
== last
- first
+ 1)
2457 for (i
= first
; i
<= last
; ++i
)
2458 if (m_store_info
[i
]->rhs_code
!= MEM_REF
)
2468 /* Will emit LROTATE_EXPR. */
2471 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2472 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)
2476 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2477 && optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
)
2484 if (!allow_unaligned
&& n
.base_addr
)
2486 unsigned int align
= get_object_alignment (n
.src
);
2487 if (align
< try_size
)
2491 /* If each load has vuse of the corresponding store, need to verify
2492 the loads can be sunk right before the last store. */
2493 if (vuse_store
== 1)
2495 auto_vec
<tree
, 64> refs
;
2496 for (unsigned int i
= first
; i
<= last
; ++i
)
2497 gather_bswap_load_refs (&refs
,
2498 gimple_assign_rhs1 (m_store_info
[i
]->stmt
));
2502 FOR_EACH_VEC_ELT (refs
, i
, ref
)
2503 if (stmts_may_clobber_ref_p (first_stmt
, last_stmt
, ref
))
2509 infof
->ins_stmt
= ins_stmt
;
2510 for (unsigned int i
= first
; i
<= last
; ++i
)
2512 m_store_info
[i
]->rhs_code
= n
.n
== cmpxchg
? LROTATE_EXPR
: NOP_EXPR
;
2513 m_store_info
[i
]->ops
[0].base_addr
= NULL_TREE
;
2514 m_store_info
[i
]->ops
[1].base_addr
= NULL_TREE
;
2516 merged_store
->merge_into (m_store_info
[i
]);
2522 /* Go through the candidate stores recorded in m_store_info and merge them
2523 into merged_store_group objects recorded into m_merged_store_groups
2524 representing the widened stores. Return true if coalescing was successful
2525 and the number of widened stores is fewer than the original number
2529 imm_store_chain_info::coalesce_immediate_stores ()
2531 /* Anything less can't be processed. */
2532 if (m_store_info
.length () < 2)
2535 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2536 fprintf (dump_file
, "Attempting to coalesce %u stores in chain.\n",
2537 m_store_info
.length ());
2539 store_immediate_info
*info
;
2540 unsigned int i
, ignore
= 0;
2542 /* Order the stores by the bitposition they write to. */
2543 m_store_info
.qsort (sort_by_bitpos
);
2545 info
= m_store_info
[0];
2546 merged_store_group
*merged_store
= new merged_store_group (info
);
2548 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
2550 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2552 fprintf (dump_file
, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2553 " bitpos:" HOST_WIDE_INT_PRINT_DEC
" val:\n",
2554 i
, info
->bitsize
, info
->bitpos
);
2555 print_generic_expr (dump_file
, gimple_assign_rhs1 (info
->stmt
));
2556 fprintf (dump_file
, "\n------------\n");
2562 /* First try to handle group of stores like:
2567 using the bswap framework. */
2568 if (info
->bitpos
== merged_store
->start
+ merged_store
->width
2569 && merged_store
->stores
.length () == 1
2570 && merged_store
->stores
[0]->ins_stmt
!= NULL
2571 && info
->ins_stmt
!= NULL
)
2573 unsigned int try_size
;
2574 for (try_size
= 64; try_size
>= 16; try_size
>>= 1)
2575 if (try_coalesce_bswap (merged_store
, i
- 1, try_size
))
2580 ignore
= i
+ merged_store
->stores
.length () - 1;
2581 m_merged_store_groups
.safe_push (merged_store
);
2582 if (ignore
< m_store_info
.length ())
2583 merged_store
= new merged_store_group (m_store_info
[ignore
]);
2585 merged_store
= NULL
;
2592 Overlapping stores. */
2593 if (IN_RANGE (info
->bitpos
, merged_store
->start
,
2594 merged_store
->start
+ merged_store
->width
- 1))
2596 /* Only allow overlapping stores of constants. */
2597 if (info
->rhs_code
== INTEGER_CST
2598 && merged_store
->stores
[0]->rhs_code
== INTEGER_CST
)
2600 merged_store
->merge_overlapping (info
);
2604 /* |---store 1---||---store 2---|
2605 This store is consecutive to the previous one.
2606 Merge it into the current store group. There can be gaps in between
2607 the stores, but there can't be gaps in between bitregions. */
2608 else if (info
->rhs_code
!= LROTATE_EXPR
2609 && info
->bitregion_start
<= merged_store
->bitregion_end
2610 && info
->rhs_code
== merged_store
->stores
[0]->rhs_code
)
2612 store_immediate_info
*infof
= merged_store
->stores
[0];
2614 /* All the rhs_code ops that take 2 operands are commutative,
2615 swap the operands if it could make the operands compatible. */
2616 if (infof
->ops
[0].base_addr
2617 && infof
->ops
[1].base_addr
2618 && info
->ops
[0].base_addr
2619 && info
->ops
[1].base_addr
2620 && (info
->ops
[1].bitpos
- infof
->ops
[0].bitpos
2621 == info
->bitpos
- infof
->bitpos
)
2622 && operand_equal_p (info
->ops
[1].base_addr
,
2623 infof
->ops
[0].base_addr
, 0))
2625 std::swap (info
->ops
[0], info
->ops
[1]);
2626 info
->ops_swapped_p
= true;
2628 if ((infof
->ops
[0].base_addr
2629 ? compatible_load_p (merged_store
, info
, base_addr
, 0)
2630 : !info
->ops
[0].base_addr
)
2631 && (infof
->ops
[1].base_addr
2632 ? compatible_load_p (merged_store
, info
, base_addr
, 1)
2633 : !info
->ops
[1].base_addr
))
2635 merged_store
->merge_into (info
);
2640 /* |---store 1---| <gap> |---store 2---|.
2641 Gap between stores or the rhs not compatible. Start a new group. */
2643 /* Try to apply all the stores recorded for the group to determine
2644 the bitpattern they write and discard it if that fails.
2645 This will also reject single-store groups. */
2646 if (!merged_store
->apply_stores ())
2647 delete merged_store
;
2649 m_merged_store_groups
.safe_push (merged_store
);
2651 merged_store
= new merged_store_group (info
);
2654 /* Record or discard the last store group. */
2657 if (!merged_store
->apply_stores ())
2658 delete merged_store
;
2660 m_merged_store_groups
.safe_push (merged_store
);
2663 gcc_assert (m_merged_store_groups
.length () <= m_store_info
.length ());
2665 = !m_merged_store_groups
.is_empty ()
2666 && m_merged_store_groups
.length () < m_store_info
.length ();
2668 if (success
&& dump_file
)
2669 fprintf (dump_file
, "Coalescing successful!\n"
2670 "Merged into %u stores\n",
2671 m_merged_store_groups
.length ());
2676 /* Return the type to use for the merged stores or loads described by STMTS.
2677 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2678 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2679 of the MEM_REFs if any. */
2682 get_alias_type_for_stmts (vec
<gimple
*> &stmts
, bool is_load
,
2683 unsigned short *cliquep
, unsigned short *basep
)
2687 tree type
= NULL_TREE
;
2688 tree ret
= NULL_TREE
;
2692 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2694 tree ref
= is_load
? gimple_assign_rhs1 (stmt
)
2695 : gimple_assign_lhs (stmt
);
2696 tree type1
= reference_alias_ptr_type (ref
);
2697 tree base
= get_base_address (ref
);
2701 if (TREE_CODE (base
) == MEM_REF
)
2703 *cliquep
= MR_DEPENDENCE_CLIQUE (base
);
2704 *basep
= MR_DEPENDENCE_BASE (base
);
2709 if (!alias_ptr_types_compatible_p (type
, type1
))
2710 ret
= ptr_type_node
;
2711 if (TREE_CODE (base
) != MEM_REF
2712 || *cliquep
!= MR_DEPENDENCE_CLIQUE (base
)
2713 || *basep
!= MR_DEPENDENCE_BASE (base
))
2722 /* Return the location_t information we can find among the statements
2726 get_location_for_stmts (vec
<gimple
*> &stmts
)
2731 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2732 if (gimple_has_location (stmt
))
2733 return gimple_location (stmt
);
2735 return UNKNOWN_LOCATION
;
2738 /* Used to decribe a store resulting from splitting a wide store in smaller
2739 regularly-sized stores in split_group. */
2743 unsigned HOST_WIDE_INT bytepos
;
2744 unsigned HOST_WIDE_INT size
;
2745 unsigned HOST_WIDE_INT align
;
2746 auto_vec
<store_immediate_info
*> orig_stores
;
2747 /* True if there is a single orig stmt covering the whole split store. */
2749 split_store (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
2750 unsigned HOST_WIDE_INT
);
2753 /* Simple constructor. */
2755 split_store::split_store (unsigned HOST_WIDE_INT bp
,
2756 unsigned HOST_WIDE_INT sz
,
2757 unsigned HOST_WIDE_INT al
)
2758 : bytepos (bp
), size (sz
), align (al
), orig (false)
2760 orig_stores
.create (0);
2763 /* Record all stores in GROUP that write to the region starting at BITPOS and
2764 is of size BITSIZE. Record infos for such statements in STORES if
2765 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
2766 if there is exactly one original store in the range. */
2768 static store_immediate_info
*
2769 find_constituent_stores (struct merged_store_group
*group
,
2770 vec
<store_immediate_info
*> *stores
,
2771 unsigned int *first
,
2772 unsigned HOST_WIDE_INT bitpos
,
2773 unsigned HOST_WIDE_INT bitsize
)
2775 store_immediate_info
*info
, *ret
= NULL
;
2777 bool second
= false;
2778 bool update_first
= true;
2779 unsigned HOST_WIDE_INT end
= bitpos
+ bitsize
;
2780 for (i
= *first
; group
->stores
.iterate (i
, &info
); ++i
)
2782 unsigned HOST_WIDE_INT stmt_start
= info
->bitpos
;
2783 unsigned HOST_WIDE_INT stmt_end
= stmt_start
+ info
->bitsize
;
2784 if (stmt_end
<= bitpos
)
2786 /* BITPOS passed to this function never decreases from within the
2787 same split_group call, so optimize and don't scan info records
2788 which are known to end before or at BITPOS next time.
2789 Only do it if all stores before this one also pass this. */
2795 update_first
= false;
2797 /* The stores in GROUP are ordered by bitposition so if we're past
2798 the region for this group return early. */
2799 if (stmt_start
>= end
)
2804 stores
->safe_push (info
);
2819 /* Return how many SSA_NAMEs used to compute value to store in the INFO
2820 store have multiple uses. If any SSA_NAME has multiple uses, also
2821 count statements needed to compute it. */
2824 count_multiple_uses (store_immediate_info
*info
)
2826 gimple
*stmt
= info
->stmt
;
2828 switch (info
->rhs_code
)
2835 if (info
->bit_not_p
)
2837 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2838 ret
= 1; /* Fall through below to return
2839 the BIT_NOT_EXPR stmt and then
2840 BIT_{AND,IOR,XOR}_EXPR and anything it
2843 /* stmt is after this the BIT_NOT_EXPR. */
2844 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2846 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2848 ret
+= 1 + info
->ops
[0].bit_not_p
;
2849 if (info
->ops
[1].base_addr
)
2850 ret
+= 1 + info
->ops
[1].bit_not_p
;
2853 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2854 /* stmt is now the BIT_*_EXPR. */
2855 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2856 ret
+= 1 + info
->ops
[info
->ops_swapped_p
].bit_not_p
;
2857 else if (info
->ops
[info
->ops_swapped_p
].bit_not_p
)
2859 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2860 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
2863 if (info
->ops
[1].base_addr
== NULL_TREE
)
2865 gcc_checking_assert (!info
->ops_swapped_p
);
2868 if (!has_single_use (gimple_assign_rhs2 (stmt
)))
2869 ret
+= 1 + info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
;
2870 else if (info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
)
2872 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
2873 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
2878 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2879 return 1 + info
->ops
[0].bit_not_p
;
2880 else if (info
->ops
[0].bit_not_p
)
2882 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2883 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2892 /* Split a merged store described by GROUP by populating the SPLIT_STORES
2893 vector (if non-NULL) with split_store structs describing the byte offset
2894 (from the base), the bit size and alignment of each store as well as the
2895 original statements involved in each such split group.
2896 This is to separate the splitting strategy from the statement
2897 building/emission/linking done in output_merged_store.
2898 Return number of new stores.
2899 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
2900 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
2901 If SPLIT_STORES is NULL, it is just a dry run to count number of
2905 split_group (merged_store_group
*group
, bool allow_unaligned_store
,
2906 bool allow_unaligned_load
,
2907 vec
<struct split_store
*> *split_stores
,
2908 unsigned *total_orig
,
2909 unsigned *total_new
)
2911 unsigned HOST_WIDE_INT pos
= group
->bitregion_start
;
2912 unsigned HOST_WIDE_INT size
= group
->bitregion_end
- pos
;
2913 unsigned HOST_WIDE_INT bytepos
= pos
/ BITS_PER_UNIT
;
2914 unsigned HOST_WIDE_INT group_align
= group
->align
;
2915 unsigned HOST_WIDE_INT align_base
= group
->align_base
;
2916 unsigned HOST_WIDE_INT group_load_align
= group_align
;
2917 bool any_orig
= false;
2919 gcc_assert ((size
% BITS_PER_UNIT
== 0) && (pos
% BITS_PER_UNIT
== 0));
2921 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
2922 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
2924 /* For bswap framework using sets of stores, all the checking
2925 has been done earlier in try_coalesce_bswap and needs to be
2926 emitted as a single store. */
2929 /* Avoid the old/new stmt count heuristics. It should be
2930 always beneficial. */
2937 unsigned HOST_WIDE_INT align_bitpos
2938 = (group
->start
- align_base
) & (group_align
- 1);
2939 unsigned HOST_WIDE_INT align
= group_align
;
2941 align
= least_bit_hwi (align_bitpos
);
2942 bytepos
= group
->start
/ BITS_PER_UNIT
;
2943 struct split_store
*store
2944 = new split_store (bytepos
, group
->width
, align
);
2945 unsigned int first
= 0;
2946 find_constituent_stores (group
, &store
->orig_stores
,
2947 &first
, group
->start
, group
->width
);
2948 split_stores
->safe_push (store
);
2954 unsigned int ret
= 0, first
= 0;
2955 unsigned HOST_WIDE_INT try_pos
= bytepos
;
2960 store_immediate_info
*info
= group
->stores
[0];
2963 total_orig
[0] = 1; /* The orig store. */
2964 info
= group
->stores
[0];
2965 if (info
->ops
[0].base_addr
)
2967 if (info
->ops
[1].base_addr
)
2969 switch (info
->rhs_code
)
2974 total_orig
[0]++; /* The orig BIT_*_EXPR stmt. */
2979 total_orig
[0] *= group
->stores
.length ();
2981 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
2983 total_new
[0] += count_multiple_uses (info
);
2984 total_orig
[0] += (info
->bit_not_p
2985 + info
->ops
[0].bit_not_p
2986 + info
->ops
[1].bit_not_p
);
2990 if (!allow_unaligned_load
)
2991 for (int i
= 0; i
< 2; ++i
)
2992 if (group
->load_align
[i
])
2993 group_load_align
= MIN (group_load_align
, group
->load_align
[i
]);
2997 if ((allow_unaligned_store
|| group_align
<= BITS_PER_UNIT
)
2998 && group
->mask
[try_pos
- bytepos
] == (unsigned char) ~0U)
3000 /* Skip padding bytes. */
3002 size
-= BITS_PER_UNIT
;
3006 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3007 unsigned int try_size
= MAX_STORE_BITSIZE
, nonmasked
;
3008 unsigned HOST_WIDE_INT align_bitpos
3009 = (try_bitpos
- align_base
) & (group_align
- 1);
3010 unsigned HOST_WIDE_INT align
= group_align
;
3012 align
= least_bit_hwi (align_bitpos
);
3013 if (!allow_unaligned_store
)
3014 try_size
= MIN (try_size
, align
);
3015 if (!allow_unaligned_load
)
3017 /* If we can't do or don't want to do unaligned stores
3018 as well as loads, we need to take the loads into account
3020 unsigned HOST_WIDE_INT load_align
= group_load_align
;
3021 align_bitpos
= (try_bitpos
- align_base
) & (load_align
- 1);
3023 load_align
= least_bit_hwi (align_bitpos
);
3024 for (int i
= 0; i
< 2; ++i
)
3025 if (group
->load_align
[i
])
3027 align_bitpos
= try_bitpos
- group
->stores
[0]->bitpos
;
3028 align_bitpos
+= group
->stores
[0]->ops
[i
].bitpos
;
3029 align_bitpos
-= group
->load_align_base
[i
];
3030 align_bitpos
&= (group_load_align
- 1);
3033 unsigned HOST_WIDE_INT a
= least_bit_hwi (align_bitpos
);
3034 load_align
= MIN (load_align
, a
);
3037 try_size
= MIN (try_size
, load_align
);
3039 store_immediate_info
*info
3040 = find_constituent_stores (group
, NULL
, &first
, try_bitpos
, try_size
);
3043 /* If there is just one original statement for the range, see if
3044 we can just reuse the original store which could be even larger
3046 unsigned HOST_WIDE_INT stmt_end
3047 = ROUND_UP (info
->bitpos
+ info
->bitsize
, BITS_PER_UNIT
);
3048 info
= find_constituent_stores (group
, NULL
, &first
, try_bitpos
,
3049 stmt_end
- try_bitpos
);
3050 if (info
&& info
->bitpos
>= try_bitpos
)
3052 try_size
= stmt_end
- try_bitpos
;
3057 /* Approximate store bitsize for the case when there are no padding
3059 while (try_size
> size
)
3061 /* Now look for whole padding bytes at the end of that bitsize. */
3062 for (nonmasked
= try_size
/ BITS_PER_UNIT
; nonmasked
> 0; --nonmasked
)
3063 if (group
->mask
[try_pos
- bytepos
+ nonmasked
- 1]
3064 != (unsigned char) ~0U)
3068 /* If entire try_size range is padding, skip it. */
3069 try_pos
+= try_size
/ BITS_PER_UNIT
;
3073 /* Otherwise try to decrease try_size if second half, last 3 quarters
3074 etc. are padding. */
3075 nonmasked
*= BITS_PER_UNIT
;
3076 while (nonmasked
<= try_size
/ 2)
3078 if (!allow_unaligned_store
&& group_align
> BITS_PER_UNIT
)
3080 /* Now look for whole padding bytes at the start of that bitsize. */
3081 unsigned int try_bytesize
= try_size
/ BITS_PER_UNIT
, masked
;
3082 for (masked
= 0; masked
< try_bytesize
; ++masked
)
3083 if (group
->mask
[try_pos
- bytepos
+ masked
] != (unsigned char) ~0U)
3085 masked
*= BITS_PER_UNIT
;
3086 gcc_assert (masked
< try_size
);
3087 if (masked
>= try_size
/ 2)
3089 while (masked
>= try_size
/ 2)
3092 try_pos
+= try_size
/ BITS_PER_UNIT
;
3096 /* Need to recompute the alignment, so just retry at the new
3107 struct split_store
*store
3108 = new split_store (try_pos
, try_size
, align
);
3109 info
= find_constituent_stores (group
, &store
->orig_stores
,
3110 &first
, try_bitpos
, try_size
);
3112 && info
->bitpos
>= try_bitpos
3113 && info
->bitpos
+ info
->bitsize
<= try_bitpos
+ try_size
)
3118 split_stores
->safe_push (store
);
3121 try_pos
+= try_size
/ BITS_PER_UNIT
;
3128 struct split_store
*store
;
3129 /* If we are reusing some original stores and any of the
3130 original SSA_NAMEs had multiple uses, we need to subtract
3131 those now before we add the new ones. */
3132 if (total_new
[0] && any_orig
)
3134 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3136 total_new
[0] -= count_multiple_uses (store
->orig_stores
[0]);
3138 total_new
[0] += ret
; /* The new store. */
3139 store_immediate_info
*info
= group
->stores
[0];
3140 if (info
->ops
[0].base_addr
)
3141 total_new
[0] += ret
;
3142 if (info
->ops
[1].base_addr
)
3143 total_new
[0] += ret
;
3144 switch (info
->rhs_code
)
3149 total_new
[0] += ret
; /* The new BIT_*_EXPR stmt. */
3154 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3157 bool bit_not_p
[3] = { false, false, false };
3158 /* If all orig_stores have certain bit_not_p set, then
3159 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3160 If some orig_stores have certain bit_not_p set, then
3161 we'd use a BIT_XOR_EXPR with a mask and need to account for
3163 FOR_EACH_VEC_ELT (store
->orig_stores
, j
, info
)
3165 if (info
->ops
[0].bit_not_p
)
3166 bit_not_p
[0] = true;
3167 if (info
->ops
[1].bit_not_p
)
3168 bit_not_p
[1] = true;
3169 if (info
->bit_not_p
)
3170 bit_not_p
[2] = true;
3172 total_new
[0] += bit_not_p
[0] + bit_not_p
[1] + bit_not_p
[2];
3180 /* Return the operation through which the operand IDX (if < 2) or
3181 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3182 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3183 the bits should be xored with mask. */
3185 static enum tree_code
3186 invert_op (split_store
*split_store
, int idx
, tree int_type
, tree
&mask
)
3189 store_immediate_info
*info
;
3190 unsigned int cnt
= 0;
3191 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3193 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3200 if (cnt
== split_store
->orig_stores
.length ())
3201 return BIT_NOT_EXPR
;
3203 unsigned HOST_WIDE_INT try_bitpos
= split_store
->bytepos
* BITS_PER_UNIT
;
3204 unsigned buf_size
= split_store
->size
/ BITS_PER_UNIT
;
3206 = XALLOCAVEC (unsigned char, buf_size
);
3207 memset (buf
, ~0U, buf_size
);
3208 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3210 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3213 /* Clear regions with bit_not_p and invert afterwards, rather than
3214 clear regions with !bit_not_p, so that gaps in between stores aren't
3216 unsigned HOST_WIDE_INT bitsize
= info
->bitsize
;
3217 unsigned int pos_in_buffer
= 0;
3218 if (info
->bitpos
< try_bitpos
)
3220 gcc_assert (info
->bitpos
+ bitsize
> try_bitpos
);
3221 bitsize
-= (try_bitpos
- info
->bitpos
);
3224 pos_in_buffer
= info
->bitpos
- try_bitpos
;
3225 if (pos_in_buffer
+ bitsize
> split_store
->size
)
3226 bitsize
= split_store
->size
- pos_in_buffer
;
3227 unsigned char *p
= buf
+ (pos_in_buffer
/ BITS_PER_UNIT
);
3228 if (BYTES_BIG_ENDIAN
)
3229 clear_bit_region_be (p
, (BITS_PER_UNIT
- 1
3230 - (pos_in_buffer
% BITS_PER_UNIT
)), bitsize
);
3232 clear_bit_region (p
, pos_in_buffer
% BITS_PER_UNIT
, bitsize
);
3234 for (unsigned int i
= 0; i
< buf_size
; ++i
)
3236 mask
= native_interpret_expr (int_type
, buf
, buf_size
);
3237 return BIT_XOR_EXPR
;
3240 /* Given a merged store group GROUP output the widened version of it.
3241 The store chain is against the base object BASE.
3242 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3243 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3244 Make sure that the number of statements output is less than the number of
3245 original statements. If a better sequence is possible emit it and
3249 imm_store_chain_info::output_merged_store (merged_store_group
*group
)
3251 unsigned HOST_WIDE_INT start_byte_pos
3252 = group
->bitregion_start
/ BITS_PER_UNIT
;
3254 unsigned int orig_num_stmts
= group
->stores
.length ();
3255 if (orig_num_stmts
< 2)
3258 auto_vec
<struct split_store
*, 32> split_stores
;
3259 split_stores
.create (0);
3260 bool allow_unaligned_store
3261 = !STRICT_ALIGNMENT
&& PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED
);
3262 bool allow_unaligned_load
= allow_unaligned_store
;
3263 if (allow_unaligned_store
)
3265 /* If unaligned stores are allowed, see how many stores we'd emit
3266 for unaligned and how many stores we'd emit for aligned stores.
3267 Only use unaligned stores if it allows fewer stores than aligned. */
3268 unsigned aligned_cnt
3269 = split_group (group
, false, allow_unaligned_load
, NULL
, NULL
, NULL
);
3270 unsigned unaligned_cnt
3271 = split_group (group
, true, allow_unaligned_load
, NULL
, NULL
, NULL
);
3272 if (aligned_cnt
<= unaligned_cnt
)
3273 allow_unaligned_store
= false;
3275 unsigned total_orig
, total_new
;
3276 split_group (group
, allow_unaligned_store
, allow_unaligned_load
,
3277 &split_stores
, &total_orig
, &total_new
);
3279 if (split_stores
.length () >= orig_num_stmts
)
3281 /* We didn't manage to reduce the number of statements. Bail out. */
3282 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3283 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
3284 " Not profitable to emit new sequence.\n",
3288 if (total_orig
<= total_new
)
3290 /* If number of estimated new statements is above estimated original
3291 statements, bail out too. */
3292 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3293 fprintf (dump_file
, "Estimated number of original stmts (%u)"
3294 " not larger than estimated number of new"
3296 total_orig
, total_new
);
3300 gimple_stmt_iterator last_gsi
= gsi_for_stmt (group
->last_stmt
);
3301 gimple_seq seq
= NULL
;
3302 tree last_vdef
, new_vuse
;
3303 last_vdef
= gimple_vdef (group
->last_stmt
);
3304 new_vuse
= gimple_vuse (group
->last_stmt
);
3305 tree bswap_res
= NULL_TREE
;
3307 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3308 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3310 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
3311 gimple
*ins_stmt
= group
->stores
[0]->ins_stmt
;
3312 struct symbolic_number
*n
= &group
->stores
[0]->n
;
3313 bool bswap
= group
->stores
[0]->rhs_code
== LROTATE_EXPR
;
3318 load_type
= bswap_type
= uint16_type_node
;
3321 load_type
= uint32_type_node
;
3324 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
3325 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3329 load_type
= uint64_type_node
;
3332 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
3333 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3340 /* If the loads have each vuse of the corresponding store,
3341 we've checked the aliasing already in try_coalesce_bswap and
3342 we want to sink the need load into seq. So need to use new_vuse
3344 if (n
->base_addr
&& n
->vuse
== NULL
)
3349 bswap_res
= bswap_replace (gsi_start (seq
), ins_stmt
, fndecl
,
3350 bswap_type
, load_type
, n
, bswap
);
3351 gcc_assert (bswap_res
);
3354 gimple
*stmt
= NULL
;
3355 split_store
*split_store
;
3357 auto_vec
<gimple
*, 32> orig_stmts
;
3358 gimple_seq this_seq
;
3359 tree addr
= force_gimple_operand_1 (unshare_expr (base_addr
), &this_seq
,
3360 is_gimple_mem_ref_addr
, NULL_TREE
);
3361 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3363 tree load_addr
[2] = { NULL_TREE
, NULL_TREE
};
3364 gimple_seq load_seq
[2] = { NULL
, NULL
};
3365 gimple_stmt_iterator load_gsi
[2] = { gsi_none (), gsi_none () };
3366 for (int j
= 0; j
< 2; ++j
)
3368 store_operand_info
&op
= group
->stores
[0]->ops
[j
];
3369 if (op
.base_addr
== NULL_TREE
)
3372 store_immediate_info
*infol
= group
->stores
.last ();
3373 if (gimple_vuse (op
.stmt
) == gimple_vuse (infol
->ops
[j
].stmt
))
3375 /* We can't pick the location randomly; while we've verified
3376 all the loads have the same vuse, they can be still in different
3377 basic blocks and we need to pick the one from the last bb:
3383 otherwise if we put the wider load at the q[0] load, we might
3384 segfault if q[1] is not mapped. */
3385 basic_block bb
= gimple_bb (op
.stmt
);
3386 gimple
*ostmt
= op
.stmt
;
3387 store_immediate_info
*info
;
3388 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3390 gimple
*tstmt
= info
->ops
[j
].stmt
;
3391 basic_block tbb
= gimple_bb (tstmt
);
3392 if (dominated_by_p (CDI_DOMINATORS
, tbb
, bb
))
3398 load_gsi
[j
] = gsi_for_stmt (ostmt
);
3400 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3401 &load_seq
[j
], is_gimple_mem_ref_addr
,
3404 else if (operand_equal_p (base_addr
, op
.base_addr
, 0))
3405 load_addr
[j
] = addr
;
3409 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3410 &this_seq
, is_gimple_mem_ref_addr
,
3412 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3416 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3418 unsigned HOST_WIDE_INT try_size
= split_store
->size
;
3419 unsigned HOST_WIDE_INT try_pos
= split_store
->bytepos
;
3420 unsigned HOST_WIDE_INT align
= split_store
->align
;
3423 if (split_store
->orig
)
3425 /* If there is just a single constituent store which covers
3426 the whole area, just reuse the lhs and rhs. */
3427 gimple
*orig_stmt
= split_store
->orig_stores
[0]->stmt
;
3428 dest
= gimple_assign_lhs (orig_stmt
);
3429 src
= gimple_assign_rhs1 (orig_stmt
);
3430 loc
= gimple_location (orig_stmt
);
3434 store_immediate_info
*info
;
3435 unsigned short clique
, base
;
3437 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3438 orig_stmts
.safe_push (info
->stmt
);
3440 = get_alias_type_for_stmts (orig_stmts
, false, &clique
, &base
);
3441 loc
= get_location_for_stmts (orig_stmts
);
3442 orig_stmts
.truncate (0);
3444 tree int_type
= build_nonstandard_integer_type (try_size
, UNSIGNED
);
3445 int_type
= build_aligned_type (int_type
, align
);
3446 dest
= fold_build2 (MEM_REF
, int_type
, addr
,
3447 build_int_cst (offset_type
, try_pos
));
3448 if (TREE_CODE (dest
) == MEM_REF
)
3450 MR_DEPENDENCE_CLIQUE (dest
) = clique
;
3451 MR_DEPENDENCE_BASE (dest
) = base
;
3454 tree mask
= integer_zero_node
;
3456 mask
= native_interpret_expr (int_type
,
3457 group
->mask
+ try_pos
3463 j
< 1 + (split_store
->orig_stores
[0]->ops
[1].val
!= NULL_TREE
);
3466 store_operand_info
&op
= split_store
->orig_stores
[0]->ops
[j
];
3469 else if (op
.base_addr
)
3471 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3472 orig_stmts
.safe_push (info
->ops
[j
].stmt
);
3474 offset_type
= get_alias_type_for_stmts (orig_stmts
, true,
3476 location_t load_loc
= get_location_for_stmts (orig_stmts
);
3477 orig_stmts
.truncate (0);
3479 unsigned HOST_WIDE_INT load_align
= group
->load_align
[j
];
3480 unsigned HOST_WIDE_INT align_bitpos
3481 = (try_pos
* BITS_PER_UNIT
3482 - split_store
->orig_stores
[0]->bitpos
3483 + op
.bitpos
) & (load_align
- 1);
3485 load_align
= least_bit_hwi (align_bitpos
);
3488 = build_nonstandard_integer_type (try_size
, UNSIGNED
);
3490 = build_aligned_type (load_int_type
, load_align
);
3492 unsigned HOST_WIDE_INT load_pos
3493 = (try_pos
* BITS_PER_UNIT
3494 - split_store
->orig_stores
[0]->bitpos
3495 + op
.bitpos
) / BITS_PER_UNIT
;
3496 ops
[j
] = fold_build2 (MEM_REF
, load_int_type
, load_addr
[j
],
3497 build_int_cst (offset_type
, load_pos
));
3498 if (TREE_CODE (ops
[j
]) == MEM_REF
)
3500 MR_DEPENDENCE_CLIQUE (ops
[j
]) = clique
;
3501 MR_DEPENDENCE_BASE (ops
[j
]) = base
;
3503 if (!integer_zerop (mask
))
3504 /* The load might load some bits (that will be masked off
3505 later on) uninitialized, avoid -W*uninitialized
3506 warnings in that case. */
3507 TREE_NO_WARNING (ops
[j
]) = 1;
3509 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3511 gimple_set_location (stmt
, load_loc
);
3512 if (gsi_bb (load_gsi
[j
]))
3514 gimple_set_vuse (stmt
, gimple_vuse (op
.stmt
));
3515 gimple_seq_add_stmt_without_update (&load_seq
[j
], stmt
);
3519 gimple_set_vuse (stmt
, new_vuse
);
3520 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3522 ops
[j
] = gimple_assign_lhs (stmt
);
3524 enum tree_code inv_op
3525 = invert_op (split_store
, j
, int_type
, xor_mask
);
3526 if (inv_op
!= NOP_EXPR
)
3528 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3529 inv_op
, ops
[j
], xor_mask
);
3530 gimple_set_location (stmt
, load_loc
);
3531 ops
[j
] = gimple_assign_lhs (stmt
);
3533 if (gsi_bb (load_gsi
[j
]))
3534 gimple_seq_add_stmt_without_update (&load_seq
[j
],
3537 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3541 ops
[j
] = native_interpret_expr (int_type
,
3542 group
->val
+ try_pos
3547 switch (split_store
->orig_stores
[0]->rhs_code
)
3552 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3554 tree rhs1
= gimple_assign_rhs1 (info
->stmt
);
3555 orig_stmts
.safe_push (SSA_NAME_DEF_STMT (rhs1
));
3558 bit_loc
= get_location_for_stmts (orig_stmts
);
3559 orig_stmts
.truncate (0);
3562 = gimple_build_assign (make_ssa_name (int_type
),
3563 split_store
->orig_stores
[0]->rhs_code
,
3565 gimple_set_location (stmt
, bit_loc
);
3566 /* If there is just one load and there is a separate
3567 load_seq[0], emit the bitwise op right after it. */
3568 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
3569 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
3570 /* Otherwise, if at least one load is in seq, we need to
3571 emit the bitwise op right before the store. If there
3572 are two loads and are emitted somewhere else, it would
3573 be better to emit the bitwise op as early as possible;
3574 we don't track where that would be possible right now
3577 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3578 src
= gimple_assign_lhs (stmt
);
3580 enum tree_code inv_op
;
3581 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
3582 if (inv_op
!= NOP_EXPR
)
3584 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3585 inv_op
, src
, xor_mask
);
3586 gimple_set_location (stmt
, bit_loc
);
3587 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
3588 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
3590 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3591 src
= gimple_assign_lhs (stmt
);
3597 if (!is_gimple_val (src
))
3599 stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (src
)),
3601 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3602 src
= gimple_assign_lhs (stmt
);
3604 if (!useless_type_conversion_p (int_type
, TREE_TYPE (src
)))
3606 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3608 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3609 src
= gimple_assign_lhs (stmt
);
3617 if (!integer_zerop (mask
))
3619 tree tem
= make_ssa_name (int_type
);
3620 tree load_src
= unshare_expr (dest
);
3621 /* The load might load some or all bits uninitialized,
3622 avoid -W*uninitialized warnings in that case.
3623 As optimization, it would be nice if all the bits are
3624 provably uninitialized (no stores at all yet or previous
3625 store a CLOBBER) we'd optimize away the load and replace
3627 TREE_NO_WARNING (load_src
) = 1;
3628 stmt
= gimple_build_assign (tem
, load_src
);
3629 gimple_set_location (stmt
, loc
);
3630 gimple_set_vuse (stmt
, new_vuse
);
3631 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3633 /* FIXME: If there is a single chunk of zero bits in mask,
3634 perhaps use BIT_INSERT_EXPR instead? */
3635 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3636 BIT_AND_EXPR
, tem
, mask
);
3637 gimple_set_location (stmt
, loc
);
3638 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3639 tem
= gimple_assign_lhs (stmt
);
3641 if (TREE_CODE (src
) == INTEGER_CST
)
3642 src
= wide_int_to_tree (int_type
,
3643 wi::bit_and_not (wi::to_wide (src
),
3644 wi::to_wide (mask
)));
3648 = wide_int_to_tree (int_type
,
3649 wi::bit_not (wi::to_wide (mask
)));
3650 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3651 BIT_AND_EXPR
, src
, nmask
);
3652 gimple_set_location (stmt
, loc
);
3653 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3654 src
= gimple_assign_lhs (stmt
);
3656 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3657 BIT_IOR_EXPR
, tem
, src
);
3658 gimple_set_location (stmt
, loc
);
3659 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3660 src
= gimple_assign_lhs (stmt
);
3664 stmt
= gimple_build_assign (dest
, src
);
3665 gimple_set_location (stmt
, loc
);
3666 gimple_set_vuse (stmt
, new_vuse
);
3667 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3670 if (i
< split_stores
.length () - 1)
3671 new_vdef
= make_ssa_name (gimple_vop (cfun
), stmt
);
3673 new_vdef
= last_vdef
;
3675 gimple_set_vdef (stmt
, new_vdef
);
3676 SSA_NAME_DEF_STMT (new_vdef
) = stmt
;
3677 new_vuse
= new_vdef
;
3680 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3687 "New sequence of %u stmts to replace old one of %u stmts\n",
3688 split_stores
.length (), orig_num_stmts
);
3689 if (dump_flags
& TDF_DETAILS
)
3690 print_gimple_seq (dump_file
, seq
, 0, TDF_VOPS
| TDF_MEMSYMS
);
3692 gsi_insert_seq_after (&last_gsi
, seq
, GSI_SAME_STMT
);
3693 for (int j
= 0; j
< 2; ++j
)
3695 gsi_insert_seq_after (&load_gsi
[j
], load_seq
[j
], GSI_SAME_STMT
);
3700 /* Process the merged_store_group objects created in the coalescing phase.
3701 The stores are all against the base object BASE.
3702 Try to output the widened stores and delete the original statements if
3703 successful. Return true iff any changes were made. */
3706 imm_store_chain_info::output_merged_stores ()
3709 merged_store_group
*merged_store
;
3711 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_store
)
3713 if (output_merged_store (merged_store
))
3716 store_immediate_info
*store
;
3717 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, store
)
3719 gimple
*stmt
= store
->stmt
;
3720 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3721 gsi_remove (&gsi
, true);
3722 if (stmt
!= merged_store
->last_stmt
)
3724 unlink_stmt_vdef (stmt
);
3725 release_defs (stmt
);
3731 if (ret
&& dump_file
)
3732 fprintf (dump_file
, "Merging successful!\n");
3737 /* Coalesce the store_immediate_info objects recorded against the base object
3738 BASE in the first phase and output them.
3739 Delete the allocated structures.
3740 Return true if any changes were made. */
3743 imm_store_chain_info::terminate_and_process_chain ()
3745 /* Process store chain. */
3747 if (m_store_info
.length () > 1)
3749 ret
= coalesce_immediate_stores ();
3751 ret
= output_merged_stores ();
3754 /* Delete all the entries we allocated ourselves. */
3755 store_immediate_info
*info
;
3757 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
3760 merged_store_group
*merged_info
;
3761 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_info
)
3767 /* Return true iff LHS is a destination potentially interesting for
3768 store merging. In practice these are the codes that get_inner_reference
3772 lhs_valid_for_store_merging_p (tree lhs
)
3774 tree_code code
= TREE_CODE (lhs
);
3776 if (code
== ARRAY_REF
|| code
== ARRAY_RANGE_REF
|| code
== MEM_REF
3777 || code
== COMPONENT_REF
|| code
== BIT_FIELD_REF
)
3783 /* Return true if the tree RHS is a constant we want to consider
3784 during store merging. In practice accept all codes that
3785 native_encode_expr accepts. */
3788 rhs_valid_for_store_merging_p (tree rhs
)
3790 return native_encode_expr (rhs
, NULL
,
3791 GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs
)))) != 0;
3794 /* If MEM is a memory reference usable for store merging (either as
3795 store destination or for loads), return the non-NULL base_addr
3796 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
3797 Otherwise return NULL, *PBITPOS should be still valid even for that
3801 mem_valid_for_store_merging (tree mem
, unsigned HOST_WIDE_INT
*pbitsize
,
3802 unsigned HOST_WIDE_INT
*pbitpos
,
3803 unsigned HOST_WIDE_INT
*pbitregion_start
,
3804 unsigned HOST_WIDE_INT
*pbitregion_end
)
3806 HOST_WIDE_INT bitsize
;
3807 HOST_WIDE_INT bitpos
;
3808 unsigned HOST_WIDE_INT bitregion_start
= 0;
3809 unsigned HOST_WIDE_INT bitregion_end
= 0;
3811 int unsignedp
= 0, reversep
= 0, volatilep
= 0;
3813 tree base_addr
= get_inner_reference (mem
, &bitsize
, &bitpos
, &offset
, &mode
,
3814 &unsignedp
, &reversep
, &volatilep
);
3815 *pbitsize
= bitsize
;
3819 if (TREE_CODE (mem
) == COMPONENT_REF
3820 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem
, 1)))
3822 get_bit_range (&bitregion_start
, &bitregion_end
, mem
, &bitpos
, &offset
);
3830 /* We do not want to rewrite TARGET_MEM_REFs. */
3831 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
3833 /* In some cases get_inner_reference may return a
3834 MEM_REF [ptr + byteoffset]. For the purposes of this pass
3835 canonicalize the base_addr to MEM_REF [ptr] and take
3836 byteoffset into account in the bitpos. This occurs in
3837 PR 23684 and this way we can catch more chains. */
3838 else if (TREE_CODE (base_addr
) == MEM_REF
)
3840 offset_int bit_off
, byte_off
= mem_ref_offset (base_addr
);
3841 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
3843 if (!wi::neg_p (bit_off
) && wi::fits_shwi_p (bit_off
))
3845 bitpos
= bit_off
.to_shwi ();
3848 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
3849 bit_off
+= bitregion_start
;
3850 if (wi::fits_uhwi_p (bit_off
))
3852 bitregion_start
= bit_off
.to_uhwi ();
3853 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
3854 bit_off
+= bitregion_end
;
3855 if (wi::fits_uhwi_p (bit_off
))
3856 bitregion_end
= bit_off
.to_uhwi ();
3866 base_addr
= TREE_OPERAND (base_addr
, 0);
3868 /* get_inner_reference returns the base object, get at its
3874 base_addr
= build_fold_addr_expr (base_addr
);
3879 bitregion_start
= ROUND_DOWN (bitpos
, BITS_PER_UNIT
);
3880 bitregion_end
= ROUND_UP (bitpos
+ bitsize
, BITS_PER_UNIT
);
3883 if (offset
!= NULL_TREE
)
3885 /* If the access is variable offset then a base decl has to be
3886 address-taken to be able to emit pointer-based stores to it.
3887 ??? We might be able to get away with re-using the original
3888 base up to the first variable part and then wrapping that inside
3890 tree base
= get_base_address (base_addr
);
3892 || (DECL_P (base
) && ! TREE_ADDRESSABLE (base
)))
3895 base_addr
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base_addr
),
3899 *pbitsize
= bitsize
;
3901 *pbitregion_start
= bitregion_start
;
3902 *pbitregion_end
= bitregion_end
;
3906 /* Return true if STMT is a load that can be used for store merging.
3907 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
3908 BITREGION_END are properties of the corresponding store. */
3911 handled_load (gimple
*stmt
, store_operand_info
*op
,
3912 unsigned HOST_WIDE_INT bitsize
, unsigned HOST_WIDE_INT bitpos
,
3913 unsigned HOST_WIDE_INT bitregion_start
,
3914 unsigned HOST_WIDE_INT bitregion_end
)
3916 if (!is_gimple_assign (stmt
))
3918 if (gimple_assign_rhs_code (stmt
) == BIT_NOT_EXPR
)
3920 tree rhs1
= gimple_assign_rhs1 (stmt
);
3921 if (TREE_CODE (rhs1
) == SSA_NAME
3922 && handled_load (SSA_NAME_DEF_STMT (rhs1
), op
, bitsize
, bitpos
,
3923 bitregion_start
, bitregion_end
))
3925 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
3926 been optimized earlier, but if allowed here, would confuse the
3927 multiple uses counting. */
3930 op
->bit_not_p
= !op
->bit_not_p
;
3935 if (gimple_vuse (stmt
)
3936 && gimple_assign_load_p (stmt
)
3937 && !stmt_can_throw_internal (stmt
)
3938 && !gimple_has_volatile_ops (stmt
))
3940 tree mem
= gimple_assign_rhs1 (stmt
);
3942 = mem_valid_for_store_merging (mem
, &op
->bitsize
, &op
->bitpos
,
3943 &op
->bitregion_start
,
3944 &op
->bitregion_end
);
3945 if (op
->base_addr
!= NULL_TREE
3946 && op
->bitsize
== bitsize
3947 && ((op
->bitpos
- bitpos
) % BITS_PER_UNIT
) == 0
3948 && op
->bitpos
- op
->bitregion_start
>= bitpos
- bitregion_start
3949 && op
->bitregion_end
- op
->bitpos
>= bitregion_end
- bitpos
)
3953 op
->bit_not_p
= false;
3960 /* Record the store STMT for store merging optimization if it can be
3964 pass_store_merging::process_store (gimple
*stmt
)
3966 tree lhs
= gimple_assign_lhs (stmt
);
3967 tree rhs
= gimple_assign_rhs1 (stmt
);
3968 unsigned HOST_WIDE_INT bitsize
, bitpos
;
3969 unsigned HOST_WIDE_INT bitregion_start
;
3970 unsigned HOST_WIDE_INT bitregion_end
;
3972 = mem_valid_for_store_merging (lhs
, &bitsize
, &bitpos
,
3973 &bitregion_start
, &bitregion_end
);
3977 bool invalid
= (base_addr
== NULL_TREE
3978 || ((bitsize
> MAX_BITSIZE_MODE_ANY_INT
)
3979 && (TREE_CODE (rhs
) != INTEGER_CST
)));
3980 enum tree_code rhs_code
= ERROR_MARK
;
3981 bool bit_not_p
= false;
3982 struct symbolic_number n
;
3983 gimple
*ins_stmt
= NULL
;
3984 store_operand_info ops
[2];
3987 else if (rhs_valid_for_store_merging_p (rhs
))
3989 rhs_code
= INTEGER_CST
;
3992 else if (TREE_CODE (rhs
) != SSA_NAME
)
3996 gimple
*def_stmt
= SSA_NAME_DEF_STMT (rhs
), *def_stmt1
, *def_stmt2
;
3997 if (!is_gimple_assign (def_stmt
))
3999 else if (handled_load (def_stmt
, &ops
[0], bitsize
, bitpos
,
4000 bitregion_start
, bitregion_end
))
4002 else if (gimple_assign_rhs_code (def_stmt
) == BIT_NOT_EXPR
)
4004 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4005 if (TREE_CODE (rhs1
) == SSA_NAME
4006 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1
)))
4009 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
4012 if (rhs_code
== ERROR_MARK
&& !invalid
)
4013 switch ((rhs_code
= gimple_assign_rhs_code (def_stmt
)))
4019 rhs1
= gimple_assign_rhs1 (def_stmt
);
4020 rhs2
= gimple_assign_rhs2 (def_stmt
);
4022 if (TREE_CODE (rhs1
) != SSA_NAME
)
4024 def_stmt1
= SSA_NAME_DEF_STMT (rhs1
);
4025 if (!is_gimple_assign (def_stmt1
)
4026 || !handled_load (def_stmt1
, &ops
[0], bitsize
, bitpos
,
4027 bitregion_start
, bitregion_end
))
4029 if (rhs_valid_for_store_merging_p (rhs2
))
4031 else if (TREE_CODE (rhs2
) != SSA_NAME
)
4035 def_stmt2
= SSA_NAME_DEF_STMT (rhs2
);
4036 if (!is_gimple_assign (def_stmt2
))
4038 else if (!handled_load (def_stmt2
, &ops
[1], bitsize
, bitpos
,
4039 bitregion_start
, bitregion_end
))
4048 if ((bitsize
% BITS_PER_UNIT
) == 0
4049 && (bitpos
% BITS_PER_UNIT
) == 0
4051 && BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
)
4053 ins_stmt
= find_bswap_or_nop_1 (def_stmt
, &n
, 12);
4057 for (unsigned HOST_WIDE_INT i
= 0;
4058 i
< bitsize
; i
+= BITS_PER_UNIT
, nn
>>= BITS_PER_MARKER
)
4059 if ((nn
& MARKER_MASK
) == 0
4060 || (nn
& MARKER_MASK
) == MARKER_BYTE_UNKNOWN
)
4069 rhs_code
= LROTATE_EXPR
;
4070 ops
[0].base_addr
= NULL_TREE
;
4071 ops
[1].base_addr
= NULL_TREE
;
4081 terminate_all_aliasing_chains (NULL
, stmt
);
4086 memset (&n
, 0, sizeof (n
));
4088 struct imm_store_chain_info
**chain_info
= NULL
;
4090 chain_info
= m_stores
.get (base_addr
);
4092 store_immediate_info
*info
;
4095 unsigned int ord
= (*chain_info
)->m_store_info
.length ();
4096 info
= new store_immediate_info (bitsize
, bitpos
, bitregion_start
,
4097 bitregion_end
, stmt
, ord
, rhs_code
,
4099 bit_not_p
, ops
[0], ops
[1]);
4100 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4102 fprintf (dump_file
, "Recording immediate store from stmt:\n");
4103 print_gimple_stmt (dump_file
, stmt
, 0);
4105 (*chain_info
)->m_store_info
.safe_push (info
);
4106 terminate_all_aliasing_chains (chain_info
, stmt
);
4107 /* If we reach the limit of stores to merge in a chain terminate and
4108 process the chain now. */
4109 if ((*chain_info
)->m_store_info
.length ()
4110 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE
))
4112 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4114 "Reached maximum number of statements to merge:\n");
4115 terminate_and_release_chain (*chain_info
);
4120 /* Store aliases any existing chain? */
4121 terminate_all_aliasing_chains (NULL
, stmt
);
4122 /* Start a new chain. */
4123 struct imm_store_chain_info
*new_chain
4124 = new imm_store_chain_info (m_stores_head
, base_addr
);
4125 info
= new store_immediate_info (bitsize
, bitpos
, bitregion_start
,
4126 bitregion_end
, stmt
, 0, rhs_code
,
4128 bit_not_p
, ops
[0], ops
[1]);
4129 new_chain
->m_store_info
.safe_push (info
);
4130 m_stores
.put (base_addr
, new_chain
);
4131 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4133 fprintf (dump_file
, "Starting new chain with statement:\n");
4134 print_gimple_stmt (dump_file
, stmt
, 0);
4135 fprintf (dump_file
, "The base object is:\n");
4136 print_generic_expr (dump_file
, base_addr
);
4137 fprintf (dump_file
, "\n");
4141 /* Entry point for the pass. Go over each basic block recording chains of
4142 immediate stores. Upon encountering a terminating statement (as defined
4143 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4147 pass_store_merging::execute (function
*fun
)
4150 hash_set
<gimple
*> orig_stmts
;
4152 calculate_dominance_info (CDI_DOMINATORS
);
4154 FOR_EACH_BB_FN (bb
, fun
)
4156 gimple_stmt_iterator gsi
;
4157 unsigned HOST_WIDE_INT num_statements
= 0;
4158 /* Record the original statements so that we can keep track of
4159 statements emitted in this pass and not re-process new
4161 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4163 if (is_gimple_debug (gsi_stmt (gsi
)))
4166 if (++num_statements
>= 2)
4170 if (num_statements
< 2)
4173 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4174 fprintf (dump_file
, "Processing basic block <%d>:\n", bb
->index
);
4176 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4178 gimple
*stmt
= gsi_stmt (gsi
);
4180 if (is_gimple_debug (stmt
))
4183 if (gimple_has_volatile_ops (stmt
))
4185 /* Terminate all chains. */
4186 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4187 fprintf (dump_file
, "Volatile access terminates "
4189 terminate_and_process_all_chains ();
4193 if (gimple_assign_single_p (stmt
) && gimple_vdef (stmt
)
4194 && !stmt_can_throw_internal (stmt
)
4195 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt
)))
4196 process_store (stmt
);
4198 terminate_all_aliasing_chains (NULL
, stmt
);
4200 terminate_and_process_all_chains ();
4207 /* Construct and return a store merging pass object. */
4210 make_pass_store_merging (gcc::context
*ctxt
)
4212 return new pass_store_merging (ctxt
);
4217 namespace selftest
{
4219 /* Selftests for store merging helpers. */
4221 /* Assert that all elements of the byte arrays X and Y, both of length N
4225 verify_array_eq (unsigned char *x
, unsigned char *y
, unsigned int n
)
4227 for (unsigned int i
= 0; i
< n
; i
++)
4231 fprintf (stderr
, "Arrays do not match. X:\n");
4232 dump_char_array (stderr
, x
, n
);
4233 fprintf (stderr
, "Y:\n");
4234 dump_char_array (stderr
, y
, n
);
4236 ASSERT_EQ (x
[i
], y
[i
]);
4240 /* Test shift_bytes_in_array and that it carries bits across between
4244 verify_shift_bytes_in_array (void)
4247 00011111 | 11100000. */
4248 unsigned char orig
[2] = { 0xe0, 0x1f };
4249 unsigned char in
[2];
4250 memcpy (in
, orig
, sizeof orig
);
4252 unsigned char expected
[2] = { 0x80, 0x7f };
4253 shift_bytes_in_array (in
, sizeof (in
), 2);
4254 verify_array_eq (in
, expected
, sizeof (in
));
4256 memcpy (in
, orig
, sizeof orig
);
4257 memcpy (expected
, orig
, sizeof orig
);
4258 /* Check that shifting by zero doesn't change anything. */
4259 shift_bytes_in_array (in
, sizeof (in
), 0);
4260 verify_array_eq (in
, expected
, sizeof (in
));
4264 /* Test shift_bytes_in_array_right and that it carries bits across between
4268 verify_shift_bytes_in_array_right (void)
4271 00011111 | 11100000. */
4272 unsigned char orig
[2] = { 0x1f, 0xe0};
4273 unsigned char in
[2];
4274 memcpy (in
, orig
, sizeof orig
);
4275 unsigned char expected
[2] = { 0x07, 0xf8};
4276 shift_bytes_in_array_right (in
, sizeof (in
), 2);
4277 verify_array_eq (in
, expected
, sizeof (in
));
4279 memcpy (in
, orig
, sizeof orig
);
4280 memcpy (expected
, orig
, sizeof orig
);
4281 /* Check that shifting by zero doesn't change anything. */
4282 shift_bytes_in_array_right (in
, sizeof (in
), 0);
4283 verify_array_eq (in
, expected
, sizeof (in
));
4286 /* Test clear_bit_region that it clears exactly the bits asked and
4290 verify_clear_bit_region (void)
4292 /* Start with all bits set and test clearing various patterns in them. */
4293 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
4294 unsigned char in
[3];
4295 unsigned char expected
[3];
4296 memcpy (in
, orig
, sizeof in
);
4298 /* Check zeroing out all the bits. */
4299 clear_bit_region (in
, 0, 3 * BITS_PER_UNIT
);
4300 expected
[0] = expected
[1] = expected
[2] = 0;
4301 verify_array_eq (in
, expected
, sizeof in
);
4303 memcpy (in
, orig
, sizeof in
);
4304 /* Leave the first and last bits intact. */
4305 clear_bit_region (in
, 1, 3 * BITS_PER_UNIT
- 2);
4309 verify_array_eq (in
, expected
, sizeof in
);
4312 /* Test verify_clear_bit_region_be that it clears exactly the bits asked and
4316 verify_clear_bit_region_be (void)
4318 /* Start with all bits set and test clearing various patterns in them. */
4319 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
4320 unsigned char in
[3];
4321 unsigned char expected
[3];
4322 memcpy (in
, orig
, sizeof in
);
4324 /* Check zeroing out all the bits. */
4325 clear_bit_region_be (in
, BITS_PER_UNIT
- 1, 3 * BITS_PER_UNIT
);
4326 expected
[0] = expected
[1] = expected
[2] = 0;
4327 verify_array_eq (in
, expected
, sizeof in
);
4329 memcpy (in
, orig
, sizeof in
);
4330 /* Leave the first and last bits intact. */
4331 clear_bit_region_be (in
, BITS_PER_UNIT
- 2, 3 * BITS_PER_UNIT
- 2);
4335 verify_array_eq (in
, expected
, sizeof in
);
4339 /* Run all of the selftests within this file. */
4342 store_merging_c_tests (void)
4344 verify_shift_bytes_in_array ();
4345 verify_shift_bytes_in_array_right ();
4346 verify_clear_bit_region ();
4347 verify_clear_bit_region_be ();
4350 } // namespace selftest
4351 #endif /* CHECKING_P. */