c++: fix explicit/copy problem [PR109247]
[official-gcc.git] / gcc / gimple-ssa-store-merging.cc
blob9cb574fa315fc353071663acf80d48858ea84a22
1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
141 #include "config.h"
142 #include "system.h"
143 #include "coretypes.h"
144 #include "backend.h"
145 #include "tree.h"
146 #include "gimple.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
150 #include "ssa.h"
151 #include "gimple-pretty-print.h"
152 #include "alias.h"
153 #include "fold-const.h"
154 #include "print-tree.h"
155 #include "tree-hash-traits.h"
156 #include "gimple-iterator.h"
157 #include "gimplify.h"
158 #include "gimple-fold.h"
159 #include "stor-layout.h"
160 #include "timevar.h"
161 #include "cfganal.h"
162 #include "cfgcleanup.h"
163 #include "tree-cfg.h"
164 #include "except.h"
165 #include "tree-eh.h"
166 #include "target.h"
167 #include "gimplify-me.h"
168 #include "rtl.h"
169 #include "expr.h" /* For get_bit_range. */
170 #include "optabs-tree.h"
171 #include "dbgcnt.h"
172 #include "selftest.h"
174 /* The maximum size (in bits) of the stores this pass should generate. */
175 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
176 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
178 /* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180 #define MAX_STORE_ALIAS_CHECKS 64
182 namespace {
184 struct bswap_stat
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194 } nop_stats, bswap_stats;
196 /* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
221 Note 2: for non-memory sources, range holds the same value as size.
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
225 struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
230 poly_int64_pod bytepos;
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
238 #define BITS_PER_MARKER 8
239 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240 #define MARKER_BYTE_UNKNOWN MARKER_MASK
241 #define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
244 /* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
250 /* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
256 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
260 inline bool
261 do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 uint64_t head_marker;
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
279 switch (code)
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
308 /* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
311 inline bool
312 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
314 tree lhs_type;
316 lhs_type = TREE_TYPE (gimple_get_lhs (stmt));
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
320 return false;
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
325 return true;
328 /* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
331 bool
332 init_symbolic_number (struct symbolic_number *n, tree src)
334 int size;
336 if (!INTEGRAL_TYPE_P (TREE_TYPE (src)) && !POINTER_TYPE_P (TREE_TYPE (src)))
337 return false;
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
359 return true;
362 /* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
366 bool
367 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
371 poly_int64 bitsize, bitpos, bytepos;
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
391 poly_offset_int bit_offset = 0;
392 tree off = TREE_OPERAND (base_addr, 1);
394 if (!integer_zerop (off))
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
398 bit_offset += boff;
401 base_addr = TREE_OPERAND (base_addr, 0);
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
404 if (maybe_lt (bit_offset, 0))
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
409 if (offset)
410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
411 else
412 offset = byte_offset;
415 bitpos += bit_offset.force_shwi ();
417 else
418 base_addr = build_fold_addr_expr (base_addr);
420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
421 return false;
422 if (!multiple_p (bitsize, BITS_PER_UNIT))
423 return false;
424 if (reversep)
425 return false;
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
431 n->bytepos = bytepos;
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
437 /* Compute the symbolic number N representing the result of a bitwise OR,
438 bitwise XOR or plus on 2 symbolic number N1 and N2 whose source statements
439 are respectively SOURCE_STMT1 and SOURCE_STMT2. CODE is the operation. */
441 gimple *
442 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n, enum tree_code code)
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
464 uint64_t inc;
465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
481 if (start1 < start2)
483 n_start = n1;
484 start_sub = start2 - start1;
486 else
488 n_start = n2;
489 start_sub = start1 - start2;
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
499 /* Find the highest address at which a load is performed and
500 compute related info. */
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
503 if (end1 < end2)
505 end = end2;
506 end_sub = end2 - end1;
508 else
510 end = end1;
511 end_sub = end1 - end2;
513 n_end = (end2 > end1) ? n2 : n1;
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
521 n->range = end - MIN (start1, start2) + 1;
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
540 else
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
559 uint64_t res_n = n1->n | n2->n;
561 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
563 uint64_t masked1, masked2;
565 masked1 = n1->n & mask;
566 masked2 = n2->n & mask;
567 /* If at least one byte is 0, all of 0 | x == 0 ^ x == 0 + x == x. */
568 if (masked1 && masked2)
570 /* + can carry into upper bits, just punt. */
571 if (code == PLUS_EXPR)
572 return NULL;
573 /* x | x is still x. */
574 if (code == BIT_IOR_EXPR && masked1 == masked2)
575 continue;
576 if (code == BIT_XOR_EXPR)
578 /* x ^ x is 0, but MARKER_BYTE_UNKNOWN stands for
579 unknown values and unknown ^ unknown is unknown. */
580 if (masked1 == masked2
581 && masked1 != ((uint64_t) MARKER_BYTE_UNKNOWN
582 << i * BITS_PER_MARKER))
584 res_n &= ~mask;
585 continue;
588 /* Otherwise set the byte to unknown, it might still be
589 later masked off. */
590 res_n |= mask;
593 n->n = res_n;
594 n->n_ops = n1->n_ops + n2->n_ops;
596 return source_stmt;
599 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
600 the operation given by the rhs of STMT on the result. If the operation
601 could successfully be executed the function returns a gimple stmt whose
602 rhs's first tree is the expression of the source operand and NULL
603 otherwise. */
605 gimple *
606 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
608 enum tree_code code;
609 tree rhs1, rhs2 = NULL;
610 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
611 enum gimple_rhs_class rhs_class;
613 if (!limit || !is_gimple_assign (stmt))
614 return NULL;
616 rhs1 = gimple_assign_rhs1 (stmt);
618 if (find_bswap_or_nop_load (stmt, rhs1, n))
619 return stmt;
621 /* Handle BIT_FIELD_REF. */
622 if (TREE_CODE (rhs1) == BIT_FIELD_REF
623 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
625 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
626 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
627 return NULL;
629 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
630 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
631 if (bitpos % BITS_PER_UNIT == 0
632 && bitsize % BITS_PER_UNIT == 0
633 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
635 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
636 if (BYTES_BIG_ENDIAN)
637 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
639 /* Shift. */
640 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
641 return NULL;
643 /* Mask. */
644 uint64_t mask = 0;
645 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
646 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
647 i++, tmp <<= BITS_PER_UNIT)
648 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
649 n->n &= mask;
651 /* Convert. */
652 n->type = TREE_TYPE (rhs1);
653 if (!n->base_addr)
654 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
656 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
659 return NULL;
662 if (TREE_CODE (rhs1) != SSA_NAME)
663 return NULL;
665 code = gimple_assign_rhs_code (stmt);
666 rhs_class = gimple_assign_rhs_class (stmt);
667 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
669 if (rhs_class == GIMPLE_BINARY_RHS)
670 rhs2 = gimple_assign_rhs2 (stmt);
672 /* Handle unary rhs and binary rhs with integer constants as second
673 operand. */
675 if (rhs_class == GIMPLE_UNARY_RHS
676 || (rhs_class == GIMPLE_BINARY_RHS
677 && TREE_CODE (rhs2) == INTEGER_CST))
679 if (code != BIT_AND_EXPR
680 && code != LSHIFT_EXPR
681 && code != RSHIFT_EXPR
682 && code != LROTATE_EXPR
683 && code != RROTATE_EXPR
684 && !CONVERT_EXPR_CODE_P (code))
685 return NULL;
687 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
689 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
690 we have to initialize the symbolic number. */
691 if (!source_stmt1)
693 if (gimple_assign_load_p (stmt)
694 || !init_symbolic_number (n, rhs1))
695 return NULL;
696 source_stmt1 = stmt;
699 switch (code)
701 case BIT_AND_EXPR:
703 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
704 uint64_t val = int_cst_value (rhs2), mask = 0;
705 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
707 /* Only constants masking full bytes are allowed. */
708 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
709 if ((val & tmp) != 0 && (val & tmp) != tmp)
710 return NULL;
711 else if (val & tmp)
712 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
714 n->n &= mask;
716 break;
717 case LSHIFT_EXPR:
718 case RSHIFT_EXPR:
719 case LROTATE_EXPR:
720 case RROTATE_EXPR:
721 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
722 return NULL;
723 break;
724 CASE_CONVERT:
726 int i, type_size, old_type_size;
727 tree type;
729 type = TREE_TYPE (gimple_assign_lhs (stmt));
730 type_size = TYPE_PRECISION (type);
731 if (type_size % BITS_PER_UNIT != 0)
732 return NULL;
733 type_size /= BITS_PER_UNIT;
734 if (type_size > 64 / BITS_PER_MARKER)
735 return NULL;
737 /* Sign extension: result is dependent on the value. */
738 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
739 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
740 && HEAD_MARKER (n->n, old_type_size))
741 for (i = 0; i < type_size - old_type_size; i++)
742 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
743 << ((type_size - 1 - i) * BITS_PER_MARKER);
745 if (type_size < 64 / BITS_PER_MARKER)
747 /* If STMT casts to a smaller type mask out the bits not
748 belonging to the target type. */
749 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
751 n->type = type;
752 if (!n->base_addr)
753 n->range = type_size;
755 break;
756 default:
757 return NULL;
759 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
762 /* Handle binary rhs. */
764 if (rhs_class == GIMPLE_BINARY_RHS)
766 struct symbolic_number n1, n2;
767 gimple *source_stmt, *source_stmt2;
769 if (!rhs2 || TREE_CODE (rhs2) != SSA_NAME)
770 return NULL;
772 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
774 switch (code)
776 case BIT_IOR_EXPR:
777 case BIT_XOR_EXPR:
778 case PLUS_EXPR:
779 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
781 if (!source_stmt1)
782 return NULL;
784 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
786 if (!source_stmt2)
787 return NULL;
789 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
790 return NULL;
792 if (n1.vuse != n2.vuse)
793 return NULL;
795 source_stmt
796 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n,
797 code);
799 if (!source_stmt)
800 return NULL;
802 if (!verify_symbolic_number_p (n, stmt))
803 return NULL;
805 break;
806 default:
807 return NULL;
809 return source_stmt;
811 return NULL;
814 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
815 *CMPXCHG, *CMPNOP and adjust *N. */
817 void
818 find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
819 uint64_t *cmpnop, bool *cast64_to_32)
821 unsigned rsize;
822 uint64_t tmpn, mask;
824 /* The number which the find_bswap_or_nop_1 result should match in order
825 to have a full byte swap. The number is shifted to the right
826 according to the size of the symbolic number before using it. */
827 *cmpxchg = CMPXCHG;
828 *cmpnop = CMPNOP;
829 *cast64_to_32 = false;
831 /* Find real size of result (highest non-zero byte). */
832 if (n->base_addr)
833 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
834 else
835 rsize = n->range;
837 /* Zero out the bits corresponding to untouched bytes in original gimple
838 expression. */
839 if (n->range < (int) sizeof (int64_t))
841 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
842 if (n->base_addr == NULL
843 && n->range == 4
844 && int_size_in_bytes (TREE_TYPE (n->src)) == 8)
846 /* If all bytes in n->n are either 0 or in [5..8] range, this
847 might be a candidate for (unsigned) __builtin_bswap64 (src).
848 It is not worth it for (unsigned short) __builtin_bswap64 (src)
849 or (unsigned short) __builtin_bswap32 (src). */
850 *cast64_to_32 = true;
851 for (tmpn = n->n; tmpn; tmpn >>= BITS_PER_MARKER)
852 if ((tmpn & MARKER_MASK)
853 && ((tmpn & MARKER_MASK) <= 4 || (tmpn & MARKER_MASK) > 8))
855 *cast64_to_32 = false;
856 break;
859 if (*cast64_to_32)
860 *cmpxchg &= mask;
861 else
862 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
863 *cmpnop &= mask;
866 /* Zero out the bits corresponding to unused bytes in the result of the
867 gimple expression. */
868 if (rsize < n->range)
870 if (BYTES_BIG_ENDIAN)
872 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
873 *cmpxchg &= mask;
874 if (n->range - rsize == sizeof (int64_t))
875 *cmpnop = 0;
876 else
877 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
879 else
881 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
882 if (n->range - rsize == sizeof (int64_t))
883 *cmpxchg = 0;
884 else
885 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
886 *cmpnop &= mask;
888 n->range = rsize;
891 if (*cast64_to_32)
892 n->range = 8;
893 n->range *= BITS_PER_UNIT;
896 /* Helper function for find_bswap_or_nop,
897 Return true if N is a swap or nop with MASK. */
898 static bool
899 is_bswap_or_nop_p (uint64_t n, uint64_t cmpxchg,
900 uint64_t cmpnop, uint64_t* mask,
901 bool* bswap)
903 *mask = ~(uint64_t) 0;
904 if (n == cmpnop)
905 *bswap = false;
906 else if (n == cmpxchg)
907 *bswap = true;
908 else
910 int set = 0;
911 for (uint64_t msk = MARKER_MASK; msk; msk <<= BITS_PER_MARKER)
912 if ((n & msk) == 0)
913 *mask &= ~msk;
914 else if ((n & msk) == (cmpxchg & msk))
915 set++;
916 else
917 return false;
919 if (set < 2)
920 return false;
921 *bswap = true;
923 return true;
927 /* Check if STMT completes a bswap implementation or a read in a given
928 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
929 accordingly. It also sets N to represent the kind of operations
930 performed: size of the resulting expression and whether it works on
931 a memory source, and if so alias-set and vuse. At last, the
932 function returns a stmt whose rhs's first tree is the source
933 expression. */
935 gimple *
936 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap,
937 bool *cast64_to_32, uint64_t *mask, uint64_t* l_rotate)
939 tree type_size = TYPE_SIZE_UNIT (TREE_TYPE (gimple_get_lhs (stmt)));
940 if (!tree_fits_uhwi_p (type_size))
941 return NULL;
943 /* The last parameter determines the depth search limit. It usually
944 correlates directly to the number n of bytes to be touched. We
945 increase that number by 2 * (log2(n) + 1) here in order to also
946 cover signed -> unsigned conversions of the src operand as can be seen
947 in libgcc, and for initial shift/and operation of the src operand. */
948 int limit = tree_to_uhwi (type_size);
949 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
950 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
952 if (!ins_stmt)
954 if (gimple_assign_rhs_code (stmt) != CONSTRUCTOR
955 || BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
956 return NULL;
957 unsigned HOST_WIDE_INT sz = tree_to_uhwi (type_size) * BITS_PER_UNIT;
958 if (sz != 16 && sz != 32 && sz != 64)
959 return NULL;
960 tree rhs = gimple_assign_rhs1 (stmt);
961 if (CONSTRUCTOR_NELTS (rhs) == 0)
962 return NULL;
963 tree eltype = TREE_TYPE (TREE_TYPE (rhs));
964 unsigned HOST_WIDE_INT eltsz
965 = int_size_in_bytes (eltype) * BITS_PER_UNIT;
966 if (TYPE_PRECISION (eltype) != eltsz)
967 return NULL;
968 constructor_elt *elt;
969 unsigned int i;
970 tree type = build_nonstandard_integer_type (sz, 1);
971 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs), i, elt)
973 if (TREE_CODE (elt->value) != SSA_NAME
974 || !INTEGRAL_TYPE_P (TREE_TYPE (elt->value)))
975 return NULL;
976 struct symbolic_number n1;
977 gimple *source_stmt
978 = find_bswap_or_nop_1 (SSA_NAME_DEF_STMT (elt->value), &n1,
979 limit - 1);
981 if (!source_stmt)
982 return NULL;
984 n1.type = type;
985 if (!n1.base_addr)
986 n1.range = sz / BITS_PER_UNIT;
988 if (i == 0)
990 ins_stmt = source_stmt;
991 *n = n1;
993 else
995 if (n->vuse != n1.vuse)
996 return NULL;
998 struct symbolic_number n0 = *n;
1000 if (!BYTES_BIG_ENDIAN)
1002 if (!do_shift_rotate (LSHIFT_EXPR, &n1, i * eltsz))
1003 return NULL;
1005 else if (!do_shift_rotate (LSHIFT_EXPR, &n0, eltsz))
1006 return NULL;
1007 ins_stmt
1008 = perform_symbolic_merge (ins_stmt, &n0, source_stmt, &n1, n,
1009 BIT_IOR_EXPR);
1011 if (!ins_stmt)
1012 return NULL;
1017 uint64_t cmpxchg, cmpnop;
1018 uint64_t orig_range = n->range * BITS_PER_UNIT;
1019 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop, cast64_to_32);
1021 /* A complete byte swap should make the symbolic number to start with
1022 the largest digit in the highest order byte. Unchanged symbolic
1023 number indicates a read with same endianness as target architecture. */
1024 *l_rotate = 0;
1025 uint64_t tmp_n = n->n;
1026 if (!is_bswap_or_nop_p (tmp_n, cmpxchg, cmpnop, mask, bswap))
1028 /* Try bswap + lrotate. */
1029 /* TODO, handle cast64_to_32 and big/litte_endian memory
1030 source when rsize < range. */
1031 if (n->range == orig_range
1032 && ((orig_range == 32
1033 && optab_handler (rotl_optab, SImode) != CODE_FOR_nothing)
1034 || (orig_range == 64
1035 && optab_handler (rotl_optab, DImode) != CODE_FOR_nothing))
1036 && (tmp_n & MARKER_MASK) < orig_range / BITS_PER_UNIT)
1038 uint64_t range = (orig_range / BITS_PER_UNIT) * BITS_PER_MARKER;
1039 uint64_t count = (tmp_n & MARKER_MASK) * BITS_PER_MARKER;
1040 /* .i.e. hanlde 0x203040506070800 when lower byte is zero. */
1041 if (!count)
1043 for (uint64_t i = 1; i != range / BITS_PER_MARKER; i++)
1045 count = (tmp_n >> i * BITS_PER_MARKER) & MARKER_MASK;
1046 if (count)
1048 /* Count should be meaningful not 0xff. */
1049 if (count <= range / BITS_PER_MARKER)
1051 count = (count + i) * BITS_PER_MARKER % range;
1052 break;
1054 else
1055 return NULL;
1059 tmp_n = tmp_n >> count | tmp_n << (range - count);
1060 if (orig_range == 32)
1061 tmp_n &= (1ULL << 32) - 1;
1062 if (!is_bswap_or_nop_p (tmp_n, cmpxchg, cmpnop, mask, bswap))
1063 return NULL;
1064 *l_rotate = count / BITS_PER_MARKER * BITS_PER_UNIT;
1065 gcc_assert (*bswap);
1067 else
1068 return NULL;
1071 /* Useless bit manipulation performed by code. */
1072 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
1073 return NULL;
1075 return ins_stmt;
1078 const pass_data pass_data_optimize_bswap =
1080 GIMPLE_PASS, /* type */
1081 "bswap", /* name */
1082 OPTGROUP_NONE, /* optinfo_flags */
1083 TV_NONE, /* tv_id */
1084 PROP_ssa, /* properties_required */
1085 0, /* properties_provided */
1086 0, /* properties_destroyed */
1087 0, /* todo_flags_start */
1088 0, /* todo_flags_finish */
1091 class pass_optimize_bswap : public gimple_opt_pass
1093 public:
1094 pass_optimize_bswap (gcc::context *ctxt)
1095 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
1098 /* opt_pass methods: */
1099 bool gate (function *) final override
1101 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
1104 unsigned int execute (function *) final override;
1106 }; // class pass_optimize_bswap
1108 /* Helper function for bswap_replace. Build VIEW_CONVERT_EXPR from
1109 VAL to TYPE. If VAL has different type size, emit a NOP_EXPR cast
1110 first. */
1112 static tree
1113 bswap_view_convert (gimple_stmt_iterator *gsi, tree type, tree val,
1114 bool before)
1116 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (val))
1117 || POINTER_TYPE_P (TREE_TYPE (val)));
1118 if (TYPE_SIZE (type) != TYPE_SIZE (TREE_TYPE (val)))
1120 HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_SIZE (type));
1121 if (POINTER_TYPE_P (TREE_TYPE (val)))
1123 gimple *g
1124 = gimple_build_assign (make_ssa_name (pointer_sized_int_node),
1125 NOP_EXPR, val);
1126 if (before)
1127 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1128 else
1129 gsi_insert_after (gsi, g, GSI_NEW_STMT);
1130 val = gimple_assign_lhs (g);
1132 tree itype = build_nonstandard_integer_type (prec, 1);
1133 gimple *g = gimple_build_assign (make_ssa_name (itype), NOP_EXPR, val);
1134 if (before)
1135 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1136 else
1137 gsi_insert_after (gsi, g, GSI_NEW_STMT);
1138 val = gimple_assign_lhs (g);
1140 return build1 (VIEW_CONVERT_EXPR, type, val);
1143 /* Perform the bswap optimization: replace the expression computed in the rhs
1144 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
1145 bswap, load or load + bswap expression.
1146 Which of these alternatives replace the rhs is given by N->base_addr (non
1147 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
1148 load to perform are also given in N while the builtin bswap invoke is given
1149 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
1150 load statements involved to construct the rhs in gsi_stmt (GSI) and
1151 N->range gives the size of the rhs expression for maintaining some
1152 statistics.
1154 Note that if the replacement involve a load and if gsi_stmt (GSI) is
1155 non-NULL, that stmt is moved just after INS_STMT to do the load with the
1156 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
1158 tree
1159 bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
1160 tree bswap_type, tree load_type, struct symbolic_number *n,
1161 bool bswap, uint64_t mask, uint64_t l_rotate)
1163 tree src, tmp, tgt = NULL_TREE;
1164 gimple *bswap_stmt, *mask_stmt = NULL, *rotl_stmt = NULL;
1165 tree_code conv_code = NOP_EXPR;
1167 gimple *cur_stmt = gsi_stmt (gsi);
1168 src = n->src;
1169 if (cur_stmt)
1171 tgt = gimple_assign_lhs (cur_stmt);
1172 if (gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR
1173 && tgt
1174 && VECTOR_TYPE_P (TREE_TYPE (tgt)))
1175 conv_code = VIEW_CONVERT_EXPR;
1178 /* Need to load the value from memory first. */
1179 if (n->base_addr)
1181 gimple_stmt_iterator gsi_ins = gsi;
1182 if (ins_stmt)
1183 gsi_ins = gsi_for_stmt (ins_stmt);
1184 tree addr_expr, addr_tmp, val_expr, val_tmp;
1185 tree load_offset_ptr, aligned_load_type;
1186 gimple *load_stmt;
1187 unsigned align = get_object_alignment (src);
1188 poly_int64 load_offset = 0;
1190 if (cur_stmt)
1192 basic_block ins_bb = gimple_bb (ins_stmt);
1193 basic_block cur_bb = gimple_bb (cur_stmt);
1194 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
1195 return NULL_TREE;
1197 /* Move cur_stmt just before one of the load of the original
1198 to ensure it has the same VUSE. See PR61517 for what could
1199 go wrong. */
1200 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
1201 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
1202 gsi_move_before (&gsi, &gsi_ins);
1203 gsi = gsi_for_stmt (cur_stmt);
1205 else
1206 gsi = gsi_ins;
1208 /* Compute address to load from and cast according to the size
1209 of the load. */
1210 addr_expr = build_fold_addr_expr (src);
1211 if (is_gimple_mem_ref_addr (addr_expr))
1212 addr_tmp = unshare_expr (addr_expr);
1213 else
1215 addr_tmp = unshare_expr (n->base_addr);
1216 if (!is_gimple_mem_ref_addr (addr_tmp))
1217 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
1218 is_gimple_mem_ref_addr,
1219 NULL_TREE, true,
1220 GSI_SAME_STMT);
1221 load_offset = n->bytepos;
1222 if (n->offset)
1224 tree off
1225 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
1226 true, NULL_TREE, true,
1227 GSI_SAME_STMT);
1228 gimple *stmt
1229 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
1230 POINTER_PLUS_EXPR, addr_tmp, off);
1231 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1232 addr_tmp = gimple_assign_lhs (stmt);
1236 /* Perform the load. */
1237 aligned_load_type = load_type;
1238 if (align < TYPE_ALIGN (load_type))
1239 aligned_load_type = build_aligned_type (load_type, align);
1240 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1241 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1242 load_offset_ptr);
1244 if (!bswap)
1246 if (n->range == 16)
1247 nop_stats.found_16bit++;
1248 else if (n->range == 32)
1249 nop_stats.found_32bit++;
1250 else
1252 gcc_assert (n->range == 64);
1253 nop_stats.found_64bit++;
1256 /* Convert the result of load if necessary. */
1257 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
1259 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1260 "load_dst");
1261 load_stmt = gimple_build_assign (val_tmp, val_expr);
1262 gimple_set_vuse (load_stmt, n->vuse);
1263 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1264 if (conv_code == VIEW_CONVERT_EXPR)
1265 val_tmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), val_tmp,
1266 true);
1267 gimple_assign_set_rhs_with_ops (&gsi, conv_code, val_tmp);
1268 update_stmt (cur_stmt);
1270 else if (cur_stmt)
1272 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1273 gimple_set_vuse (cur_stmt, n->vuse);
1274 update_stmt (cur_stmt);
1276 else
1278 tgt = make_ssa_name (load_type);
1279 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1280 gimple_set_vuse (cur_stmt, n->vuse);
1281 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
1284 if (dump_file)
1286 fprintf (dump_file,
1287 "%d bit load in target endianness found at: ",
1288 (int) n->range);
1289 print_gimple_stmt (dump_file, cur_stmt, 0);
1291 return tgt;
1293 else
1295 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1296 load_stmt = gimple_build_assign (val_tmp, val_expr);
1297 gimple_set_vuse (load_stmt, n->vuse);
1298 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1300 src = val_tmp;
1302 else if (!bswap)
1304 gimple *g = NULL;
1305 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
1307 if (!is_gimple_val (src))
1308 return NULL_TREE;
1309 if (conv_code == VIEW_CONVERT_EXPR)
1310 src = bswap_view_convert (&gsi, TREE_TYPE (tgt), src, true);
1311 g = gimple_build_assign (tgt, conv_code, src);
1313 else if (cur_stmt)
1314 g = gimple_build_assign (tgt, src);
1315 else
1316 tgt = src;
1317 if (n->range == 16)
1318 nop_stats.found_16bit++;
1319 else if (n->range == 32)
1320 nop_stats.found_32bit++;
1321 else
1323 gcc_assert (n->range == 64);
1324 nop_stats.found_64bit++;
1326 if (dump_file)
1328 fprintf (dump_file,
1329 "%d bit reshuffle in target endianness found at: ",
1330 (int) n->range);
1331 if (cur_stmt)
1332 print_gimple_stmt (dump_file, cur_stmt, 0);
1333 else
1335 print_generic_expr (dump_file, tgt, TDF_NONE);
1336 fprintf (dump_file, "\n");
1339 if (cur_stmt)
1340 gsi_replace (&gsi, g, true);
1341 return tgt;
1343 else if (TREE_CODE (src) == BIT_FIELD_REF)
1344 src = TREE_OPERAND (src, 0);
1346 if (n->range == 16)
1347 bswap_stats.found_16bit++;
1348 else if (n->range == 32)
1349 bswap_stats.found_32bit++;
1350 else
1352 gcc_assert (n->range == 64);
1353 bswap_stats.found_64bit++;
1356 tmp = src;
1358 /* Convert the src expression if necessary. */
1359 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1361 gimple *convert_stmt;
1363 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1364 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1365 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1368 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1369 are considered as rotation of 2N bit values by N bits is generally not
1370 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1371 gives 0x03040102 while a bswap for that value is 0x04030201. */
1372 if (bswap && n->range == 16)
1374 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1375 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1376 bswap_stmt = gimple_build_assign (NULL, src);
1378 else
1379 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1381 if (tgt == NULL_TREE)
1382 tgt = make_ssa_name (bswap_type);
1383 tmp = tgt;
1385 if (mask != ~(uint64_t) 0)
1387 tree m = build_int_cst (bswap_type, mask);
1388 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1389 gimple_set_lhs (bswap_stmt, tmp);
1390 mask_stmt = gimple_build_assign (tgt, BIT_AND_EXPR, tmp, m);
1391 tmp = tgt;
1394 if (l_rotate)
1396 tree m = build_int_cst (bswap_type, l_rotate);
1397 tmp = make_temp_ssa_name (bswap_type, NULL,
1398 mask_stmt ? "bswapmaskdst" : "bswapdst");
1399 gimple_set_lhs (mask_stmt ? mask_stmt : bswap_stmt, tmp);
1400 rotl_stmt = gimple_build_assign (tgt, LROTATE_EXPR, tmp, m);
1401 tmp = tgt;
1404 /* Convert the result if necessary. */
1405 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1407 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1408 tree atmp = tmp;
1409 gimple_stmt_iterator gsi2 = gsi;
1410 if (conv_code == VIEW_CONVERT_EXPR)
1411 atmp = bswap_view_convert (&gsi2, TREE_TYPE (tgt), tmp, false);
1412 gimple *convert_stmt = gimple_build_assign (tgt, conv_code, atmp);
1413 gsi_insert_after (&gsi2, convert_stmt, GSI_SAME_STMT);
1416 gimple_set_lhs (rotl_stmt ? rotl_stmt
1417 : mask_stmt ? mask_stmt : bswap_stmt, tmp);
1419 if (dump_file)
1421 fprintf (dump_file, "%d bit bswap implementation found at: ",
1422 (int) n->range);
1423 if (cur_stmt)
1424 print_gimple_stmt (dump_file, cur_stmt, 0);
1425 else
1427 print_generic_expr (dump_file, tgt, TDF_NONE);
1428 fprintf (dump_file, "\n");
1432 if (cur_stmt)
1434 if (rotl_stmt)
1435 gsi_insert_after (&gsi, rotl_stmt, GSI_SAME_STMT);
1436 if (mask_stmt)
1437 gsi_insert_after (&gsi, mask_stmt, GSI_SAME_STMT);
1438 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1439 gsi_remove (&gsi, true);
1441 else
1443 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1444 if (mask_stmt)
1445 gsi_insert_before (&gsi, mask_stmt, GSI_SAME_STMT);
1446 if (rotl_stmt)
1447 gsi_insert_after (&gsi, rotl_stmt, GSI_SAME_STMT);
1449 return tgt;
1452 /* Try to optimize an assignment CUR_STMT with CONSTRUCTOR on the rhs
1453 using bswap optimizations. CDI_DOMINATORS need to be
1454 computed on entry. Return true if it has been optimized and
1455 TODO_update_ssa is needed. */
1457 static bool
1458 maybe_optimize_vector_constructor (gimple *cur_stmt)
1460 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1461 struct symbolic_number n;
1462 bool bswap;
1464 gcc_assert (is_gimple_assign (cur_stmt)
1465 && gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR);
1467 tree rhs = gimple_assign_rhs1 (cur_stmt);
1468 if (!VECTOR_TYPE_P (TREE_TYPE (rhs))
1469 || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
1470 || gimple_assign_lhs (cur_stmt) == NULL_TREE)
1471 return false;
1473 HOST_WIDE_INT sz = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
1474 switch (sz)
1476 case 16:
1477 load_type = bswap_type = uint16_type_node;
1478 break;
1479 case 32:
1480 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1481 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
1483 load_type = uint32_type_node;
1484 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1485 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1487 else
1488 return false;
1489 break;
1490 case 64:
1491 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1492 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1493 || (word_mode == SImode
1494 && builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1495 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)))
1497 load_type = uint64_type_node;
1498 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1499 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1501 else
1502 return false;
1503 break;
1504 default:
1505 return false;
1508 bool cast64_to_32;
1509 uint64_t mask, l_rotate;
1510 gimple *ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap,
1511 &cast64_to_32, &mask, &l_rotate);
1512 if (!ins_stmt
1513 || n.range != (unsigned HOST_WIDE_INT) sz
1514 || cast64_to_32
1515 || mask != ~(uint64_t) 0)
1516 return false;
1518 if (bswap && !fndecl && n.range != 16)
1519 return false;
1521 memset (&nop_stats, 0, sizeof (nop_stats));
1522 memset (&bswap_stats, 0, sizeof (bswap_stats));
1523 return bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1524 bswap_type, load_type, &n, bswap, mask,
1525 l_rotate) != NULL_TREE;
1528 /* Find manual byte swap implementations as well as load in a given
1529 endianness. Byte swaps are turned into a bswap builtin invokation
1530 while endian loads are converted to bswap builtin invokation or
1531 simple load according to the target endianness. */
1533 unsigned int
1534 pass_optimize_bswap::execute (function *fun)
1536 basic_block bb;
1537 bool bswap32_p, bswap64_p;
1538 bool changed = false;
1539 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1541 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1542 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1543 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1544 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1545 || (bswap32_p && word_mode == SImode)));
1547 /* Determine the argument type of the builtins. The code later on
1548 assumes that the return and argument type are the same. */
1549 if (bswap32_p)
1551 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1552 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1555 if (bswap64_p)
1557 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1558 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1561 memset (&nop_stats, 0, sizeof (nop_stats));
1562 memset (&bswap_stats, 0, sizeof (bswap_stats));
1563 calculate_dominance_info (CDI_DOMINATORS);
1565 FOR_EACH_BB_FN (bb, fun)
1567 gimple_stmt_iterator gsi;
1569 /* We do a reverse scan for bswap patterns to make sure we get the
1570 widest match. As bswap pattern matching doesn't handle previously
1571 inserted smaller bswap replacements as sub-patterns, the wider
1572 variant wouldn't be detected. */
1573 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1575 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1576 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1577 enum tree_code code;
1578 struct symbolic_number n;
1579 bool bswap, cast64_to_32;
1580 uint64_t mask, l_rotate;
1582 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1583 might be moved to a different basic block by bswap_replace and gsi
1584 must not points to it if that's the case. Moving the gsi_prev
1585 there make sure that gsi points to the statement previous to
1586 cur_stmt while still making sure that all statements are
1587 considered in this basic block. */
1588 gsi_prev (&gsi);
1590 if (!is_gimple_assign (cur_stmt))
1591 continue;
1593 code = gimple_assign_rhs_code (cur_stmt);
1594 switch (code)
1596 case LROTATE_EXPR:
1597 case RROTATE_EXPR:
1598 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1599 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1600 % BITS_PER_UNIT)
1601 continue;
1602 /* Fall through. */
1603 case BIT_IOR_EXPR:
1604 case BIT_XOR_EXPR:
1605 case PLUS_EXPR:
1606 break;
1607 case CONSTRUCTOR:
1609 tree rhs = gimple_assign_rhs1 (cur_stmt);
1610 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
1611 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs))))
1612 break;
1614 continue;
1615 default:
1616 continue;
1619 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap,
1620 &cast64_to_32, &mask, &l_rotate);
1622 if (!ins_stmt)
1623 continue;
1625 switch (n.range)
1627 case 16:
1628 /* Already in canonical form, nothing to do. */
1629 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1630 continue;
1631 load_type = bswap_type = uint16_type_node;
1632 break;
1633 case 32:
1634 load_type = uint32_type_node;
1635 if (bswap32_p)
1637 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1638 bswap_type = bswap32_type;
1640 break;
1641 case 64:
1642 load_type = uint64_type_node;
1643 if (bswap64_p)
1645 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1646 bswap_type = bswap64_type;
1648 break;
1649 default:
1650 continue;
1653 if (bswap && !fndecl && n.range != 16)
1654 continue;
1656 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1657 bswap_type, load_type, &n, bswap, mask,
1658 l_rotate))
1659 changed = true;
1663 statistics_counter_event (fun, "16-bit nop implementations found",
1664 nop_stats.found_16bit);
1665 statistics_counter_event (fun, "32-bit nop implementations found",
1666 nop_stats.found_32bit);
1667 statistics_counter_event (fun, "64-bit nop implementations found",
1668 nop_stats.found_64bit);
1669 statistics_counter_event (fun, "16-bit bswap implementations found",
1670 bswap_stats.found_16bit);
1671 statistics_counter_event (fun, "32-bit bswap implementations found",
1672 bswap_stats.found_32bit);
1673 statistics_counter_event (fun, "64-bit bswap implementations found",
1674 bswap_stats.found_64bit);
1676 return (changed ? TODO_update_ssa : 0);
1679 } // anon namespace
1681 gimple_opt_pass *
1682 make_pass_optimize_bswap (gcc::context *ctxt)
1684 return new pass_optimize_bswap (ctxt);
1687 namespace {
1689 /* Struct recording one operand for the store, which is either a constant,
1690 then VAL represents the constant and all the other fields are zero, or
1691 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1692 and the other fields also reflect the memory load, or an SSA name, then
1693 VAL represents the SSA name and all the other fields are zero. */
1695 class store_operand_info
1697 public:
1698 tree val;
1699 tree base_addr;
1700 poly_uint64 bitsize;
1701 poly_uint64 bitpos;
1702 poly_uint64 bitregion_start;
1703 poly_uint64 bitregion_end;
1704 gimple *stmt;
1705 bool bit_not_p;
1706 store_operand_info ();
1709 store_operand_info::store_operand_info ()
1710 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
1711 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
1715 /* Struct recording the information about a single store of an immediate
1716 to memory. These are created in the first phase and coalesced into
1717 merged_store_group objects in the second phase. */
1719 class store_immediate_info
1721 public:
1722 unsigned HOST_WIDE_INT bitsize;
1723 unsigned HOST_WIDE_INT bitpos;
1724 unsigned HOST_WIDE_INT bitregion_start;
1725 /* This is one past the last bit of the bit region. */
1726 unsigned HOST_WIDE_INT bitregion_end;
1727 gimple *stmt;
1728 unsigned int order;
1729 /* INTEGER_CST for constant store, STRING_CST for string store,
1730 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1731 BIT_INSERT_EXPR for bit insertion.
1732 LROTATE_EXPR if it can be only bswap optimized and
1733 ops are not really meaningful.
1734 NOP_EXPR if bswap optimization detected identity, ops
1735 are not meaningful. */
1736 enum tree_code rhs_code;
1737 /* Two fields for bswap optimization purposes. */
1738 struct symbolic_number n;
1739 gimple *ins_stmt;
1740 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1741 bool bit_not_p;
1742 /* True if ops have been swapped and thus ops[1] represents
1743 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1744 bool ops_swapped_p;
1745 /* The index number of the landing pad, or 0 if there is none. */
1746 int lp_nr;
1747 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1748 just the first one. */
1749 store_operand_info ops[2];
1750 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1751 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1752 gimple *, unsigned int, enum tree_code,
1753 struct symbolic_number &, gimple *, bool, int,
1754 const store_operand_info &,
1755 const store_operand_info &);
1758 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
1759 unsigned HOST_WIDE_INT bp,
1760 unsigned HOST_WIDE_INT brs,
1761 unsigned HOST_WIDE_INT bre,
1762 gimple *st,
1763 unsigned int ord,
1764 enum tree_code rhscode,
1765 struct symbolic_number &nr,
1766 gimple *ins_stmtp,
1767 bool bitnotp,
1768 int nr2,
1769 const store_operand_info &op0r,
1770 const store_operand_info &op1r)
1771 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
1772 stmt (st), order (ord), rhs_code (rhscode), n (nr),
1773 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
1774 lp_nr (nr2), ops { op0r, op1r }
1778 /* Struct representing a group of stores to contiguous memory locations.
1779 These are produced by the second phase (coalescing) and consumed in the
1780 third phase that outputs the widened stores. */
1782 class merged_store_group
1784 public:
1785 unsigned HOST_WIDE_INT start;
1786 unsigned HOST_WIDE_INT width;
1787 unsigned HOST_WIDE_INT bitregion_start;
1788 unsigned HOST_WIDE_INT bitregion_end;
1789 /* The size of the allocated memory for val and mask. */
1790 unsigned HOST_WIDE_INT buf_size;
1791 unsigned HOST_WIDE_INT align_base;
1792 poly_uint64 load_align_base[2];
1794 unsigned int align;
1795 unsigned int load_align[2];
1796 unsigned int first_order;
1797 unsigned int last_order;
1798 bool bit_insertion;
1799 bool string_concatenation;
1800 bool only_constants;
1801 bool consecutive;
1802 unsigned int first_nonmergeable_order;
1803 int lp_nr;
1805 auto_vec<store_immediate_info *> stores;
1806 /* We record the first and last original statements in the sequence because
1807 we'll need their vuse/vdef and replacement position. It's easier to keep
1808 track of them separately as 'stores' is reordered by apply_stores. */
1809 gimple *last_stmt;
1810 gimple *first_stmt;
1811 unsigned char *val;
1812 unsigned char *mask;
1814 merged_store_group (store_immediate_info *);
1815 ~merged_store_group ();
1816 bool can_be_merged_into (store_immediate_info *);
1817 void merge_into (store_immediate_info *);
1818 void merge_overlapping (store_immediate_info *);
1819 bool apply_stores ();
1820 private:
1821 void do_merge (store_immediate_info *);
1824 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1826 static void
1827 dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1829 if (!fd)
1830 return;
1832 for (unsigned int i = 0; i < len; i++)
1833 fprintf (fd, "%02x ", ptr[i]);
1834 fprintf (fd, "\n");
1837 /* Clear out LEN bits starting from bit START in the byte array
1838 PTR. This clears the bits to the *right* from START.
1839 START must be within [0, BITS_PER_UNIT) and counts starting from
1840 the least significant bit. */
1842 static void
1843 clear_bit_region_be (unsigned char *ptr, unsigned int start,
1844 unsigned int len)
1846 if (len == 0)
1847 return;
1848 /* Clear len bits to the right of start. */
1849 else if (len <= start + 1)
1851 unsigned char mask = (~(~0U << len));
1852 mask = mask << (start + 1U - len);
1853 ptr[0] &= ~mask;
1855 else if (start != BITS_PER_UNIT - 1)
1857 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1858 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1859 len - (start % BITS_PER_UNIT) - 1);
1861 else if (start == BITS_PER_UNIT - 1
1862 && len > BITS_PER_UNIT)
1864 unsigned int nbytes = len / BITS_PER_UNIT;
1865 memset (ptr, 0, nbytes);
1866 if (len % BITS_PER_UNIT != 0)
1867 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1868 len % BITS_PER_UNIT);
1870 else
1871 gcc_unreachable ();
1874 /* In the byte array PTR clear the bit region starting at bit
1875 START and is LEN bits wide.
1876 For regions spanning multiple bytes do this recursively until we reach
1877 zero LEN or a region contained within a single byte. */
1879 static void
1880 clear_bit_region (unsigned char *ptr, unsigned int start,
1881 unsigned int len)
1883 /* Degenerate base case. */
1884 if (len == 0)
1885 return;
1886 else if (start >= BITS_PER_UNIT)
1887 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1888 /* Second base case. */
1889 else if ((start + len) <= BITS_PER_UNIT)
1891 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
1892 mask >>= BITS_PER_UNIT - (start + len);
1894 ptr[0] &= ~mask;
1896 return;
1898 /* Clear most significant bits in a byte and proceed with the next byte. */
1899 else if (start != 0)
1901 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1902 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
1904 /* Whole bytes need to be cleared. */
1905 else if (start == 0 && len > BITS_PER_UNIT)
1907 unsigned int nbytes = len / BITS_PER_UNIT;
1908 /* We could recurse on each byte but we clear whole bytes, so a simple
1909 memset will do. */
1910 memset (ptr, '\0', nbytes);
1911 /* Clear the remaining sub-byte region if there is one. */
1912 if (len % BITS_PER_UNIT != 0)
1913 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1915 else
1916 gcc_unreachable ();
1919 /* Write BITLEN bits of EXPR to the byte array PTR at
1920 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1921 Return true if the operation succeeded. */
1923 static bool
1924 encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
1925 unsigned int total_bytes)
1927 unsigned int first_byte = bitpos / BITS_PER_UNIT;
1928 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1929 || (bitpos % BITS_PER_UNIT)
1930 || !int_mode_for_size (bitlen, 0).exists ());
1931 bool empty_ctor_p
1932 = (TREE_CODE (expr) == CONSTRUCTOR
1933 && CONSTRUCTOR_NELTS (expr) == 0
1934 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1935 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
1937 if (!sub_byte_op_p)
1939 if (first_byte >= total_bytes)
1940 return false;
1941 total_bytes -= first_byte;
1942 if (empty_ctor_p)
1944 unsigned HOST_WIDE_INT rhs_bytes
1945 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1946 if (rhs_bytes > total_bytes)
1947 return false;
1948 memset (ptr + first_byte, '\0', rhs_bytes);
1949 return true;
1951 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1954 /* LITTLE-ENDIAN
1955 We are writing a non byte-sized quantity or at a position that is not
1956 at a byte boundary.
1957 |--------|--------|--------| ptr + first_byte
1959 xxx xxxxxxxx xxx< bp>
1960 |______EXPR____|
1962 First native_encode_expr EXPR into a temporary buffer and shift each
1963 byte in the buffer by 'bp' (carrying the bits over as necessary).
1964 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1965 <------bitlen---->< bp>
1966 Then we clear the destination bits:
1967 |---00000|00000000|000-----| ptr + first_byte
1968 <-------bitlen--->< bp>
1970 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1971 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1973 BIG-ENDIAN
1974 We are writing a non byte-sized quantity or at a position that is not
1975 at a byte boundary.
1976 ptr + first_byte |--------|--------|--------|
1978 <bp >xxx xxxxxxxx xxx
1979 |_____EXPR_____|
1981 First native_encode_expr EXPR into a temporary buffer and shift each
1982 byte in the buffer to the right by (carrying the bits over as necessary).
1983 We shift by as much as needed to align the most significant bit of EXPR
1984 with bitpos:
1985 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1986 <---bitlen----> <bp ><-----bitlen----->
1987 Then we clear the destination bits:
1988 ptr + first_byte |-----000||00000000||00000---|
1989 <bp ><-------bitlen----->
1991 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1992 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1993 The awkwardness comes from the fact that bitpos is counted from the
1994 most significant bit of a byte. */
1996 /* We must be dealing with fixed-size data at this point, since the
1997 total size is also fixed. */
1998 unsigned int byte_size;
1999 if (empty_ctor_p)
2001 unsigned HOST_WIDE_INT rhs_bytes
2002 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
2003 if (rhs_bytes > total_bytes)
2004 return false;
2005 byte_size = rhs_bytes;
2007 else
2009 fixed_size_mode mode
2010 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
2011 byte_size
2012 = mode == BLKmode
2013 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)))
2014 : GET_MODE_SIZE (mode);
2016 /* Allocate an extra byte so that we have space to shift into. */
2017 byte_size++;
2018 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
2019 memset (tmpbuf, '\0', byte_size);
2020 /* The store detection code should only have allowed constants that are
2021 accepted by native_encode_expr or empty ctors. */
2022 if (!empty_ctor_p
2023 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
2024 gcc_unreachable ();
2026 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
2027 bytes to write. This means it can write more than
2028 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
2029 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
2030 bitlen and zero out the bits that are not relevant as well (that may
2031 contain a sign bit due to sign-extension). */
2032 unsigned int padding
2033 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
2034 /* On big-endian the padding is at the 'front' so just skip the initial
2035 bytes. */
2036 if (BYTES_BIG_ENDIAN)
2037 tmpbuf += padding;
2039 byte_size -= padding;
2041 if (bitlen % BITS_PER_UNIT != 0)
2043 if (BYTES_BIG_ENDIAN)
2044 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
2045 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
2046 else
2047 clear_bit_region (tmpbuf, bitlen,
2048 byte_size * BITS_PER_UNIT - bitlen);
2050 /* Left shifting relies on the last byte being clear if bitlen is
2051 a multiple of BITS_PER_UNIT, which might not be clear if
2052 there are padding bytes. */
2053 else if (!BYTES_BIG_ENDIAN)
2054 tmpbuf[byte_size - 1] = '\0';
2056 /* Clear the bit region in PTR where the bits from TMPBUF will be
2057 inserted into. */
2058 if (BYTES_BIG_ENDIAN)
2059 clear_bit_region_be (ptr + first_byte,
2060 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
2061 else
2062 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
2064 int shift_amnt;
2065 int bitlen_mod = bitlen % BITS_PER_UNIT;
2066 int bitpos_mod = bitpos % BITS_PER_UNIT;
2068 bool skip_byte = false;
2069 if (BYTES_BIG_ENDIAN)
2071 /* BITPOS and BITLEN are exactly aligned and no shifting
2072 is necessary. */
2073 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
2074 || (bitpos_mod == 0 && bitlen_mod == 0))
2075 shift_amnt = 0;
2076 /* |. . . . . . . .|
2077 <bp > <blen >.
2078 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
2079 of the value until it aligns with 'bp' in the next byte over. */
2080 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
2082 shift_amnt = bitlen_mod + bitpos_mod;
2083 skip_byte = bitlen_mod != 0;
2085 /* |. . . . . . . .|
2086 <----bp--->
2087 <---blen---->.
2088 Shift the value right within the same byte so it aligns with 'bp'. */
2089 else
2090 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
2092 else
2093 shift_amnt = bitpos % BITS_PER_UNIT;
2095 /* Create the shifted version of EXPR. */
2096 if (!BYTES_BIG_ENDIAN)
2098 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
2099 if (shift_amnt == 0)
2100 byte_size--;
2102 else
2104 gcc_assert (BYTES_BIG_ENDIAN);
2105 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
2106 /* If shifting right forced us to move into the next byte skip the now
2107 empty byte. */
2108 if (skip_byte)
2110 tmpbuf++;
2111 byte_size--;
2115 /* Insert the bits from TMPBUF. */
2116 for (unsigned int i = 0; i < byte_size; i++)
2117 ptr[first_byte + i] |= tmpbuf[i];
2119 return true;
2122 /* Sorting function for store_immediate_info objects.
2123 Sorts them by bitposition. */
2125 static int
2126 sort_by_bitpos (const void *x, const void *y)
2128 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
2129 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
2131 if ((*tmp)->bitpos < (*tmp2)->bitpos)
2132 return -1;
2133 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
2134 return 1;
2135 else
2136 /* If they are the same let's use the order which is guaranteed to
2137 be different. */
2138 return (*tmp)->order - (*tmp2)->order;
2141 /* Sorting function for store_immediate_info objects.
2142 Sorts them by the order field. */
2144 static int
2145 sort_by_order (const void *x, const void *y)
2147 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
2148 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
2150 if ((*tmp)->order < (*tmp2)->order)
2151 return -1;
2152 else if ((*tmp)->order > (*tmp2)->order)
2153 return 1;
2155 gcc_unreachable ();
2158 /* Initialize a merged_store_group object from a store_immediate_info
2159 object. */
2161 merged_store_group::merged_store_group (store_immediate_info *info)
2163 start = info->bitpos;
2164 width = info->bitsize;
2165 bitregion_start = info->bitregion_start;
2166 bitregion_end = info->bitregion_end;
2167 /* VAL has memory allocated for it in apply_stores once the group
2168 width has been finalized. */
2169 val = NULL;
2170 mask = NULL;
2171 bit_insertion = info->rhs_code == BIT_INSERT_EXPR;
2172 string_concatenation = info->rhs_code == STRING_CST;
2173 only_constants = info->rhs_code == INTEGER_CST;
2174 consecutive = true;
2175 first_nonmergeable_order = ~0U;
2176 lp_nr = info->lp_nr;
2177 unsigned HOST_WIDE_INT align_bitpos = 0;
2178 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2179 &align, &align_bitpos);
2180 align_base = start - align_bitpos;
2181 for (int i = 0; i < 2; ++i)
2183 store_operand_info &op = info->ops[i];
2184 if (op.base_addr == NULL_TREE)
2186 load_align[i] = 0;
2187 load_align_base[i] = 0;
2189 else
2191 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
2192 load_align_base[i] = op.bitpos - align_bitpos;
2195 stores.create (1);
2196 stores.safe_push (info);
2197 last_stmt = info->stmt;
2198 last_order = info->order;
2199 first_stmt = last_stmt;
2200 first_order = last_order;
2201 buf_size = 0;
2204 merged_store_group::~merged_store_group ()
2206 if (val)
2207 XDELETEVEC (val);
2210 /* Return true if the store described by INFO can be merged into the group. */
2212 bool
2213 merged_store_group::can_be_merged_into (store_immediate_info *info)
2215 /* Do not merge bswap patterns. */
2216 if (info->rhs_code == LROTATE_EXPR)
2217 return false;
2219 if (info->lp_nr != lp_nr)
2220 return false;
2222 /* The canonical case. */
2223 if (info->rhs_code == stores[0]->rhs_code)
2224 return true;
2226 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
2227 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
2228 return !string_concatenation;
2230 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
2231 return !string_concatenation;
2233 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
2234 only for small regions since this can generate a lot of instructions. */
2235 if (info->rhs_code == MEM_REF
2236 && (stores[0]->rhs_code == INTEGER_CST
2237 || stores[0]->rhs_code == BIT_INSERT_EXPR)
2238 && info->bitregion_start == stores[0]->bitregion_start
2239 && info->bitregion_end == stores[0]->bitregion_end
2240 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
2241 return !string_concatenation;
2243 if (stores[0]->rhs_code == MEM_REF
2244 && (info->rhs_code == INTEGER_CST
2245 || info->rhs_code == BIT_INSERT_EXPR)
2246 && info->bitregion_start == stores[0]->bitregion_start
2247 && info->bitregion_end == stores[0]->bitregion_end
2248 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
2249 return !string_concatenation;
2251 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
2252 if (info->rhs_code == STRING_CST
2253 && stores[0]->rhs_code == INTEGER_CST
2254 && stores[0]->bitsize == CHAR_BIT)
2255 return !bit_insertion;
2257 if (stores[0]->rhs_code == STRING_CST
2258 && info->rhs_code == INTEGER_CST
2259 && info->bitsize == CHAR_BIT)
2260 return !bit_insertion;
2262 return false;
2265 /* Helper method for merge_into and merge_overlapping to do
2266 the common part. */
2268 void
2269 merged_store_group::do_merge (store_immediate_info *info)
2271 bitregion_start = MIN (bitregion_start, info->bitregion_start);
2272 bitregion_end = MAX (bitregion_end, info->bitregion_end);
2274 unsigned int this_align;
2275 unsigned HOST_WIDE_INT align_bitpos = 0;
2276 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2277 &this_align, &align_bitpos);
2278 if (this_align > align)
2280 align = this_align;
2281 align_base = info->bitpos - align_bitpos;
2283 for (int i = 0; i < 2; ++i)
2285 store_operand_info &op = info->ops[i];
2286 if (!op.base_addr)
2287 continue;
2289 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
2290 if (this_align > load_align[i])
2292 load_align[i] = this_align;
2293 load_align_base[i] = op.bitpos - align_bitpos;
2297 gimple *stmt = info->stmt;
2298 stores.safe_push (info);
2299 if (info->order > last_order)
2301 last_order = info->order;
2302 last_stmt = stmt;
2304 else if (info->order < first_order)
2306 first_order = info->order;
2307 first_stmt = stmt;
2310 if (info->bitpos != start + width)
2311 consecutive = false;
2313 /* We need to use extraction if there is any bit-field. */
2314 if (info->rhs_code == BIT_INSERT_EXPR)
2316 bit_insertion = true;
2317 gcc_assert (!string_concatenation);
2320 /* We want to use concatenation if there is any string. */
2321 if (info->rhs_code == STRING_CST)
2323 string_concatenation = true;
2324 gcc_assert (!bit_insertion);
2327 /* But we cannot use it if we don't have consecutive stores. */
2328 if (!consecutive)
2329 string_concatenation = false;
2331 if (info->rhs_code != INTEGER_CST)
2332 only_constants = false;
2335 /* Merge a store recorded by INFO into this merged store.
2336 The store is not overlapping with the existing recorded
2337 stores. */
2339 void
2340 merged_store_group::merge_into (store_immediate_info *info)
2342 do_merge (info);
2344 /* Make sure we're inserting in the position we think we're inserting. */
2345 gcc_assert (info->bitpos >= start + width
2346 && info->bitregion_start <= bitregion_end);
2348 width = info->bitpos + info->bitsize - start;
2351 /* Merge a store described by INFO into this merged store.
2352 INFO overlaps in some way with the current store (i.e. it's not contiguous
2353 which is handled by merged_store_group::merge_into). */
2355 void
2356 merged_store_group::merge_overlapping (store_immediate_info *info)
2358 do_merge (info);
2360 /* If the store extends the size of the group, extend the width. */
2361 if (info->bitpos + info->bitsize > start + width)
2362 width = info->bitpos + info->bitsize - start;
2365 /* Go through all the recorded stores in this group in program order and
2366 apply their values to the VAL byte array to create the final merged
2367 value. Return true if the operation succeeded. */
2369 bool
2370 merged_store_group::apply_stores ()
2372 store_immediate_info *info;
2373 unsigned int i;
2375 /* Make sure we have more than one store in the group, otherwise we cannot
2376 merge anything. */
2377 if (bitregion_start % BITS_PER_UNIT != 0
2378 || bitregion_end % BITS_PER_UNIT != 0
2379 || stores.length () == 1)
2380 return false;
2382 buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT;
2384 /* Really do string concatenation for large strings only. */
2385 if (buf_size <= MOVE_MAX)
2386 string_concatenation = false;
2388 /* String concatenation only works for byte aligned start and end. */
2389 if (start % BITS_PER_UNIT != 0 || width % BITS_PER_UNIT != 0)
2390 string_concatenation = false;
2392 /* Create a power-of-2-sized buffer for native_encode_expr. */
2393 if (!string_concatenation)
2394 buf_size = 1 << ceil_log2 (buf_size);
2396 val = XNEWVEC (unsigned char, 2 * buf_size);
2397 mask = val + buf_size;
2398 memset (val, 0, buf_size);
2399 memset (mask, ~0U, buf_size);
2401 stores.qsort (sort_by_order);
2403 FOR_EACH_VEC_ELT (stores, i, info)
2405 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
2406 tree cst;
2407 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2408 cst = info->ops[0].val;
2409 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2410 cst = info->ops[1].val;
2411 else
2412 cst = NULL_TREE;
2413 bool ret = true;
2414 if (cst && info->rhs_code != BIT_INSERT_EXPR)
2415 ret = encode_tree_to_bitpos (cst, val, info->bitsize, pos_in_buffer,
2416 buf_size);
2417 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2418 if (BYTES_BIG_ENDIAN)
2419 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2420 - (pos_in_buffer % BITS_PER_UNIT)),
2421 info->bitsize);
2422 else
2423 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
2424 if (cst && dump_file && (dump_flags & TDF_DETAILS))
2426 if (ret)
2428 fputs ("After writing ", dump_file);
2429 print_generic_expr (dump_file, cst, TDF_NONE);
2430 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
2431 " at position %d\n", info->bitsize, pos_in_buffer);
2432 fputs (" the merged value contains ", dump_file);
2433 dump_char_array (dump_file, val, buf_size);
2434 fputs (" the merged mask contains ", dump_file);
2435 dump_char_array (dump_file, mask, buf_size);
2436 if (bit_insertion)
2437 fputs (" bit insertion is required\n", dump_file);
2438 if (string_concatenation)
2439 fputs (" string concatenation is required\n", dump_file);
2441 else
2442 fprintf (dump_file, "Failed to merge stores\n");
2444 if (!ret)
2445 return false;
2447 stores.qsort (sort_by_bitpos);
2448 return true;
2451 /* Structure describing the store chain. */
2453 class imm_store_chain_info
2455 public:
2456 /* Doubly-linked list that imposes an order on chain processing.
2457 PNXP (prev's next pointer) points to the head of a list, or to
2458 the next field in the previous chain in the list.
2459 See pass_store_merging::m_stores_head for more rationale. */
2460 imm_store_chain_info *next, **pnxp;
2461 tree base_addr;
2462 auto_vec<store_immediate_info *> m_store_info;
2463 auto_vec<merged_store_group *> m_merged_store_groups;
2465 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2466 : next (inspt), pnxp (&inspt), base_addr (b_a)
2468 inspt = this;
2469 if (next)
2471 gcc_checking_assert (pnxp == next->pnxp);
2472 next->pnxp = &next;
2475 ~imm_store_chain_info ()
2477 *pnxp = next;
2478 if (next)
2480 gcc_checking_assert (&next == next->pnxp);
2481 next->pnxp = pnxp;
2484 bool terminate_and_process_chain ();
2485 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int,
2486 unsigned int);
2487 bool coalesce_immediate_stores ();
2488 bool output_merged_store (merged_store_group *);
2489 bool output_merged_stores ();
2492 const pass_data pass_data_tree_store_merging = {
2493 GIMPLE_PASS, /* type */
2494 "store-merging", /* name */
2495 OPTGROUP_NONE, /* optinfo_flags */
2496 TV_GIMPLE_STORE_MERGING, /* tv_id */
2497 PROP_ssa, /* properties_required */
2498 0, /* properties_provided */
2499 0, /* properties_destroyed */
2500 0, /* todo_flags_start */
2501 TODO_update_ssa, /* todo_flags_finish */
2504 class pass_store_merging : public gimple_opt_pass
2506 public:
2507 pass_store_merging (gcc::context *ctxt)
2508 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head (),
2509 m_n_chains (0), m_n_stores (0)
2513 /* Pass not supported for PDP-endian, nor for insane hosts or
2514 target character sizes where native_{encode,interpret}_expr
2515 doesn't work properly. */
2516 bool
2517 gate (function *) final override
2519 return flag_store_merging
2520 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
2521 && CHAR_BIT == 8
2522 && BITS_PER_UNIT == 8;
2525 unsigned int execute (function *) final override;
2527 private:
2528 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
2530 /* Form a doubly-linked stack of the elements of m_stores, so that
2531 we can iterate over them in a predictable way. Using this order
2532 avoids extraneous differences in the compiler output just because
2533 of tree pointer variations (e.g. different chains end up in
2534 different positions of m_stores, so they are handled in different
2535 orders, so they allocate or release SSA names in different
2536 orders, and when they get reused, subsequent passes end up
2537 getting different SSA names, which may ultimately change
2538 decisions when going out of SSA). */
2539 imm_store_chain_info *m_stores_head;
2541 /* The number of store chains currently tracked. */
2542 unsigned m_n_chains;
2543 /* The number of stores currently tracked. */
2544 unsigned m_n_stores;
2546 bool process_store (gimple *);
2547 bool terminate_and_process_chain (imm_store_chain_info *);
2548 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
2549 bool terminate_and_process_all_chains ();
2550 }; // class pass_store_merging
2552 /* Terminate and process all recorded chains. Return true if any changes
2553 were made. */
2555 bool
2556 pass_store_merging::terminate_and_process_all_chains ()
2558 bool ret = false;
2559 while (m_stores_head)
2560 ret |= terminate_and_process_chain (m_stores_head);
2561 gcc_assert (m_stores.is_empty ());
2562 return ret;
2565 /* Terminate all chains that are affected by the statement STMT.
2566 CHAIN_INFO is the chain we should ignore from the checks if
2567 non-NULL. Return true if any changes were made. */
2569 bool
2570 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2571 **chain_info,
2572 gimple *stmt)
2574 bool ret = false;
2576 /* If the statement doesn't touch memory it can't alias. */
2577 if (!gimple_vuse (stmt))
2578 return false;
2580 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
2581 ao_ref store_lhs_ref;
2582 ao_ref_init (&store_lhs_ref, store_lhs);
2583 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
2585 next = cur->next;
2587 /* We already checked all the stores in chain_info and terminated the
2588 chain if necessary. Skip it here. */
2589 if (chain_info && *chain_info == cur)
2590 continue;
2592 store_immediate_info *info;
2593 unsigned int i;
2594 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
2596 tree lhs = gimple_assign_lhs (info->stmt);
2597 ao_ref lhs_ref;
2598 ao_ref_init (&lhs_ref, lhs);
2599 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2600 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2601 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2602 &lhs_ref, false)))
2604 if (dump_file && (dump_flags & TDF_DETAILS))
2606 fprintf (dump_file, "stmt causes chain termination:\n");
2607 print_gimple_stmt (dump_file, stmt, 0);
2609 ret |= terminate_and_process_chain (cur);
2610 break;
2615 return ret;
2618 /* Helper function. Terminate the recorded chain storing to base object
2619 BASE. Return true if the merging and output was successful. The m_stores
2620 entry is removed after the processing in any case. */
2622 bool
2623 pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
2625 m_n_stores -= chain_info->m_store_info.length ();
2626 m_n_chains--;
2627 bool ret = chain_info->terminate_and_process_chain ();
2628 m_stores.remove (chain_info->base_addr);
2629 delete chain_info;
2630 return ret;
2633 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2634 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2635 be able to sink load of REF across stores between FIRST and LAST, up
2636 to right before LAST. */
2638 bool
2639 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2641 ao_ref r;
2642 ao_ref_init (&r, ref);
2643 unsigned int count = 0;
2644 tree vop = gimple_vdef (last);
2645 gimple *stmt;
2647 /* Return true conservatively if the basic blocks are different. */
2648 if (gimple_bb (first) != gimple_bb (last))
2649 return true;
2653 stmt = SSA_NAME_DEF_STMT (vop);
2654 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2655 return true;
2656 if (gimple_store_p (stmt)
2657 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2658 return true;
2659 /* Avoid quadratic compile time by bounding the number of checks
2660 we perform. */
2661 if (++count > MAX_STORE_ALIAS_CHECKS)
2662 return true;
2663 vop = gimple_vuse (stmt);
2665 while (stmt != first);
2667 return false;
2670 /* Return true if INFO->ops[IDX] is mergeable with the
2671 corresponding loads already in MERGED_STORE group.
2672 BASE_ADDR is the base address of the whole store group. */
2674 bool
2675 compatible_load_p (merged_store_group *merged_store,
2676 store_immediate_info *info,
2677 tree base_addr, int idx)
2679 store_immediate_info *infof = merged_store->stores[0];
2680 if (!info->ops[idx].base_addr
2681 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2682 info->bitpos - infof->bitpos)
2683 || !operand_equal_p (info->ops[idx].base_addr,
2684 infof->ops[idx].base_addr, 0))
2685 return false;
2687 store_immediate_info *infol = merged_store->stores.last ();
2688 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2689 /* In this case all vuses should be the same, e.g.
2690 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2692 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2693 and we can emit the coalesced load next to any of those loads. */
2694 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2695 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2696 return true;
2698 /* Otherwise, at least for now require that the load has the same
2699 vuse as the store. See following examples. */
2700 if (gimple_vuse (info->stmt) != load_vuse)
2701 return false;
2703 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2704 || (infof != infol
2705 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2706 return false;
2708 /* If the load is from the same location as the store, already
2709 the construction of the immediate chain info guarantees no intervening
2710 stores, so no further checks are needed. Example:
2711 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2712 if (known_eq (info->ops[idx].bitpos, info->bitpos)
2713 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2714 return true;
2716 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2717 of the stores in the group, or any other stores in between those.
2718 Previous calls to compatible_load_p ensured that for all the
2719 merged_store->stores IDX loads, no stmts starting with
2720 merged_store->first_stmt and ending right before merged_store->last_stmt
2721 clobbers those loads. */
2722 gimple *first = merged_store->first_stmt;
2723 gimple *last = merged_store->last_stmt;
2724 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2725 comes before the so far first load, we'll be changing
2726 merged_store->first_stmt. In that case we need to give up if
2727 any of the earlier processed loads clobber with the stmts in the new
2728 range. */
2729 if (info->order < merged_store->first_order)
2731 for (store_immediate_info *infoc : merged_store->stores)
2732 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2733 return false;
2734 first = info->stmt;
2736 /* Similarly, we could change merged_store->last_stmt, so ensure
2737 in that case no stmts in the new range clobber any of the earlier
2738 processed loads. */
2739 else if (info->order > merged_store->last_order)
2741 for (store_immediate_info *infoc : merged_store->stores)
2742 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2743 return false;
2744 last = info->stmt;
2746 /* And finally, we'd be adding a new load to the set, ensure it isn't
2747 clobbered in the new range. */
2748 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2749 return false;
2751 /* Otherwise, we are looking for:
2752 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2754 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2755 return true;
2758 /* Add all refs loaded to compute VAL to REFS vector. */
2760 void
2761 gather_bswap_load_refs (vec<tree> *refs, tree val)
2763 if (TREE_CODE (val) != SSA_NAME)
2764 return;
2766 gimple *stmt = SSA_NAME_DEF_STMT (val);
2767 if (!is_gimple_assign (stmt))
2768 return;
2770 if (gimple_assign_load_p (stmt))
2772 refs->safe_push (gimple_assign_rhs1 (stmt));
2773 return;
2776 switch (gimple_assign_rhs_class (stmt))
2778 case GIMPLE_BINARY_RHS:
2779 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2780 /* FALLTHRU */
2781 case GIMPLE_UNARY_RHS:
2782 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2783 break;
2784 default:
2785 gcc_unreachable ();
2789 /* Check if there are any stores in M_STORE_INFO after index I
2790 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2791 a potential group ending with END that have their order
2792 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2793 all the stores already merged and the one under consideration
2794 have rhs_code of INTEGER_CST. Return true if there are no such stores.
2795 Consider:
2796 MEM[(long long int *)p_28] = 0;
2797 MEM[(long long int *)p_28 + 8B] = 0;
2798 MEM[(long long int *)p_28 + 16B] = 0;
2799 MEM[(long long int *)p_28 + 24B] = 0;
2800 _129 = (int) _130;
2801 MEM[(int *)p_28 + 8B] = _129;
2802 MEM[(int *)p_28].a = -1;
2803 We already have
2804 MEM[(long long int *)p_28] = 0;
2805 MEM[(int *)p_28].a = -1;
2806 stmts in the current group and need to consider if it is safe to
2807 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2808 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2809 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2810 into the group and merging of those 3 stores is successful, merged
2811 stmts will be emitted at the latest store from that group, i.e.
2812 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2813 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2814 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2815 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2816 into the group. That way it will be its own store group and will
2817 not be touched. If ALL_INTEGER_CST_P and there are overlapping
2818 INTEGER_CST stores, those are mergeable using merge_overlapping,
2819 so don't return false for those.
2821 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2822 (exclusive), whether they don't overlap the bitrange START to END
2823 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2824 prevent merging in cases like:
2825 MEM <char[12]> [&b + 8B] = {};
2826 MEM[(short *) &b] = 5;
2827 _5 = *x_4(D);
2828 MEM <long long unsigned int> [&b + 2B] = _5;
2829 MEM[(char *)&b + 16B] = 88;
2830 MEM[(int *)&b + 20B] = 1;
2831 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2832 be merged with it, because the = _5 store overlaps these and is in between
2833 them in sort_by_order ordering. If it was merged, the merged store would
2834 go after the = _5 store and thus change behavior. */
2836 static bool
2837 check_no_overlap (const vec<store_immediate_info *> &m_store_info,
2838 unsigned int i,
2839 bool all_integer_cst_p, unsigned int first_order,
2840 unsigned int last_order, unsigned HOST_WIDE_INT start,
2841 unsigned HOST_WIDE_INT end, unsigned int first_earlier,
2842 unsigned end_earlier)
2844 unsigned int len = m_store_info.length ();
2845 for (unsigned int j = first_earlier; j < end_earlier; j++)
2847 store_immediate_info *info = m_store_info[j];
2848 if (info->order > first_order
2849 && info->order < last_order
2850 && info->bitpos + info->bitsize > start)
2851 return false;
2853 for (++i; i < len; ++i)
2855 store_immediate_info *info = m_store_info[i];
2856 if (info->bitpos >= end)
2857 break;
2858 if (info->order < last_order
2859 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
2860 return false;
2862 return true;
2865 /* Return true if m_store_info[first] and at least one following store
2866 form a group which store try_size bitsize value which is byte swapped
2867 from a memory load or some value, or identity from some value.
2868 This uses the bswap pass APIs. */
2870 bool
2871 imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2872 unsigned int first,
2873 unsigned int try_size,
2874 unsigned int first_earlier)
2876 unsigned int len = m_store_info.length (), last = first;
2877 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2878 if (width >= try_size)
2879 return false;
2880 for (unsigned int i = first + 1; i < len; ++i)
2882 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
2883 || m_store_info[i]->lp_nr != merged_store->lp_nr
2884 || m_store_info[i]->ins_stmt == NULL)
2885 return false;
2886 width += m_store_info[i]->bitsize;
2887 if (width >= try_size)
2889 last = i;
2890 break;
2893 if (width != try_size)
2894 return false;
2896 bool allow_unaligned
2897 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
2898 /* Punt if the combined store would not be aligned and we need alignment. */
2899 if (!allow_unaligned)
2901 unsigned int align = merged_store->align;
2902 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2903 for (unsigned int i = first + 1; i <= last; ++i)
2905 unsigned int this_align;
2906 unsigned HOST_WIDE_INT align_bitpos = 0;
2907 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2908 &this_align, &align_bitpos);
2909 if (this_align > align)
2911 align = this_align;
2912 align_base = m_store_info[i]->bitpos - align_bitpos;
2915 unsigned HOST_WIDE_INT align_bitpos
2916 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2917 if (align_bitpos)
2918 align = least_bit_hwi (align_bitpos);
2919 if (align < try_size)
2920 return false;
2923 tree type;
2924 switch (try_size)
2926 case 16: type = uint16_type_node; break;
2927 case 32: type = uint32_type_node; break;
2928 case 64: type = uint64_type_node; break;
2929 default: gcc_unreachable ();
2931 struct symbolic_number n;
2932 gimple *ins_stmt = NULL;
2933 int vuse_store = -1;
2934 unsigned int first_order = merged_store->first_order;
2935 unsigned int last_order = merged_store->last_order;
2936 gimple *first_stmt = merged_store->first_stmt;
2937 gimple *last_stmt = merged_store->last_stmt;
2938 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
2939 store_immediate_info *infof = m_store_info[first];
2941 for (unsigned int i = first; i <= last; ++i)
2943 store_immediate_info *info = m_store_info[i];
2944 struct symbolic_number this_n = info->n;
2945 this_n.type = type;
2946 if (!this_n.base_addr)
2947 this_n.range = try_size / BITS_PER_UNIT;
2948 else
2949 /* Update vuse in case it has changed by output_merged_stores. */
2950 this_n.vuse = gimple_vuse (info->ins_stmt);
2951 unsigned int bitpos = info->bitpos - infof->bitpos;
2952 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2953 BYTES_BIG_ENDIAN
2954 ? try_size - info->bitsize - bitpos
2955 : bitpos))
2956 return false;
2957 if (this_n.base_addr && vuse_store)
2959 unsigned int j;
2960 for (j = first; j <= last; ++j)
2961 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2962 break;
2963 if (j > last)
2965 if (vuse_store == 1)
2966 return false;
2967 vuse_store = 0;
2970 if (i == first)
2972 n = this_n;
2973 ins_stmt = info->ins_stmt;
2975 else
2977 if (n.base_addr && n.vuse != this_n.vuse)
2979 if (vuse_store == 0)
2980 return false;
2981 vuse_store = 1;
2983 if (info->order > last_order)
2985 last_order = info->order;
2986 last_stmt = info->stmt;
2988 else if (info->order < first_order)
2990 first_order = info->order;
2991 first_stmt = info->stmt;
2993 end = MAX (end, info->bitpos + info->bitsize);
2995 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2996 &this_n, &n, BIT_IOR_EXPR);
2997 if (ins_stmt == NULL)
2998 return false;
3002 uint64_t cmpxchg, cmpnop;
3003 bool cast64_to_32;
3004 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop, &cast64_to_32);
3006 /* A complete byte swap should make the symbolic number to start with
3007 the largest digit in the highest order byte. Unchanged symbolic
3008 number indicates a read with same endianness as target architecture. */
3009 if (n.n != cmpnop && n.n != cmpxchg)
3010 return false;
3012 /* For now. */
3013 if (cast64_to_32)
3014 return false;
3016 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
3017 return false;
3019 if (!check_no_overlap (m_store_info, last, false, first_order, last_order,
3020 merged_store->start, end, first_earlier, first))
3021 return false;
3023 /* Don't handle memory copy this way if normal non-bswap processing
3024 would handle it too. */
3025 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
3027 unsigned int i;
3028 for (i = first; i <= last; ++i)
3029 if (m_store_info[i]->rhs_code != MEM_REF)
3030 break;
3031 if (i == last + 1)
3032 return false;
3035 if (n.n == cmpxchg)
3036 switch (try_size)
3038 case 16:
3039 /* Will emit LROTATE_EXPR. */
3040 break;
3041 case 32:
3042 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
3043 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
3044 break;
3045 return false;
3046 case 64:
3047 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
3048 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
3049 break;
3050 return false;
3051 default:
3052 gcc_unreachable ();
3055 if (!allow_unaligned && n.base_addr)
3057 unsigned int align = get_object_alignment (n.src);
3058 if (align < try_size)
3059 return false;
3062 /* If each load has vuse of the corresponding store, need to verify
3063 the loads can be sunk right before the last store. */
3064 if (vuse_store == 1)
3066 auto_vec<tree, 64> refs;
3067 for (unsigned int i = first; i <= last; ++i)
3068 gather_bswap_load_refs (&refs,
3069 gimple_assign_rhs1 (m_store_info[i]->stmt));
3071 for (tree ref : refs)
3072 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
3073 return false;
3074 n.vuse = NULL_TREE;
3077 infof->n = n;
3078 infof->ins_stmt = ins_stmt;
3079 for (unsigned int i = first; i <= last; ++i)
3081 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
3082 m_store_info[i]->ops[0].base_addr = NULL_TREE;
3083 m_store_info[i]->ops[1].base_addr = NULL_TREE;
3084 if (i != first)
3085 merged_store->merge_into (m_store_info[i]);
3088 return true;
3091 /* Go through the candidate stores recorded in m_store_info and merge them
3092 into merged_store_group objects recorded into m_merged_store_groups
3093 representing the widened stores. Return true if coalescing was successful
3094 and the number of widened stores is fewer than the original number
3095 of stores. */
3097 bool
3098 imm_store_chain_info::coalesce_immediate_stores ()
3100 /* Anything less can't be processed. */
3101 if (m_store_info.length () < 2)
3102 return false;
3104 if (dump_file && (dump_flags & TDF_DETAILS))
3105 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
3106 m_store_info.length ());
3108 store_immediate_info *info;
3109 unsigned int i, ignore = 0;
3110 unsigned int first_earlier = 0;
3111 unsigned int end_earlier = 0;
3113 /* Order the stores by the bitposition they write to. */
3114 m_store_info.qsort (sort_by_bitpos);
3116 info = m_store_info[0];
3117 merged_store_group *merged_store = new merged_store_group (info);
3118 if (dump_file && (dump_flags & TDF_DETAILS))
3119 fputs ("New store group\n", dump_file);
3121 FOR_EACH_VEC_ELT (m_store_info, i, info)
3123 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
3125 if (i <= ignore)
3126 goto done;
3128 while (first_earlier < end_earlier
3129 && (m_store_info[first_earlier]->bitpos
3130 + m_store_info[first_earlier]->bitsize
3131 <= merged_store->start))
3132 first_earlier++;
3134 /* First try to handle group of stores like:
3135 p[0] = data >> 24;
3136 p[1] = data >> 16;
3137 p[2] = data >> 8;
3138 p[3] = data;
3139 using the bswap framework. */
3140 if (info->bitpos == merged_store->start + merged_store->width
3141 && merged_store->stores.length () == 1
3142 && merged_store->stores[0]->ins_stmt != NULL
3143 && info->lp_nr == merged_store->lp_nr
3144 && info->ins_stmt != NULL)
3146 unsigned int try_size;
3147 for (try_size = 64; try_size >= 16; try_size >>= 1)
3148 if (try_coalesce_bswap (merged_store, i - 1, try_size,
3149 first_earlier))
3150 break;
3152 if (try_size >= 16)
3154 ignore = i + merged_store->stores.length () - 1;
3155 m_merged_store_groups.safe_push (merged_store);
3156 if (ignore < m_store_info.length ())
3158 merged_store = new merged_store_group (m_store_info[ignore]);
3159 end_earlier = ignore;
3161 else
3162 merged_store = NULL;
3163 goto done;
3167 new_bitregion_start
3168 = MIN (merged_store->bitregion_start, info->bitregion_start);
3169 new_bitregion_end
3170 = MAX (merged_store->bitregion_end, info->bitregion_end);
3172 if (info->order >= merged_store->first_nonmergeable_order
3173 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
3174 > (unsigned) param_store_merging_max_size))
3177 /* |---store 1---|
3178 |---store 2---|
3179 Overlapping stores. */
3180 else if (IN_RANGE (info->bitpos, merged_store->start,
3181 merged_store->start + merged_store->width - 1)
3182 /* |---store 1---||---store 2---|
3183 Handle also the consecutive INTEGER_CST stores case here,
3184 as we have here the code to deal with overlaps. */
3185 || (info->bitregion_start <= merged_store->bitregion_end
3186 && info->rhs_code == INTEGER_CST
3187 && merged_store->only_constants
3188 && merged_store->can_be_merged_into (info)))
3190 /* Only allow overlapping stores of constants. */
3191 if (info->rhs_code == INTEGER_CST
3192 && merged_store->only_constants
3193 && info->lp_nr == merged_store->lp_nr)
3195 unsigned int first_order
3196 = MIN (merged_store->first_order, info->order);
3197 unsigned int last_order
3198 = MAX (merged_store->last_order, info->order);
3199 unsigned HOST_WIDE_INT end
3200 = MAX (merged_store->start + merged_store->width,
3201 info->bitpos + info->bitsize);
3202 if (check_no_overlap (m_store_info, i, true, first_order,
3203 last_order, merged_store->start, end,
3204 first_earlier, end_earlier))
3206 /* check_no_overlap call above made sure there are no
3207 overlapping stores with non-INTEGER_CST rhs_code
3208 in between the first and last of the stores we've
3209 just merged. If there are any INTEGER_CST rhs_code
3210 stores in between, we need to merge_overlapping them
3211 even if in the sort_by_bitpos order there are other
3212 overlapping stores in between. Keep those stores as is.
3213 Example:
3214 MEM[(int *)p_28] = 0;
3215 MEM[(char *)p_28 + 3B] = 1;
3216 MEM[(char *)p_28 + 1B] = 2;
3217 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
3218 We can't merge the zero store with the store of two and
3219 not merge anything else, because the store of one is
3220 in the original order in between those two, but in
3221 store_by_bitpos order it comes after the last store that
3222 we can't merge with them. We can merge the first 3 stores
3223 and keep the last store as is though. */
3224 unsigned int len = m_store_info.length ();
3225 unsigned int try_order = last_order;
3226 unsigned int first_nonmergeable_order;
3227 unsigned int k;
3228 bool last_iter = false;
3229 int attempts = 0;
3232 unsigned int max_order = 0;
3233 unsigned int min_order = first_order;
3234 unsigned first_nonmergeable_int_order = ~0U;
3235 unsigned HOST_WIDE_INT this_end = end;
3236 k = i;
3237 first_nonmergeable_order = ~0U;
3238 for (unsigned int j = i + 1; j < len; ++j)
3240 store_immediate_info *info2 = m_store_info[j];
3241 if (info2->bitpos >= this_end)
3242 break;
3243 if (info2->order < try_order)
3245 if (info2->rhs_code != INTEGER_CST
3246 || info2->lp_nr != merged_store->lp_nr)
3248 /* Normally check_no_overlap makes sure this
3249 doesn't happen, but if end grows below,
3250 then we need to process more stores than
3251 check_no_overlap verified. Example:
3252 MEM[(int *)p_5] = 0;
3253 MEM[(short *)p_5 + 3B] = 1;
3254 MEM[(char *)p_5 + 4B] = _9;
3255 MEM[(char *)p_5 + 2B] = 2; */
3256 k = 0;
3257 break;
3259 k = j;
3260 min_order = MIN (min_order, info2->order);
3261 this_end = MAX (this_end,
3262 info2->bitpos + info2->bitsize);
3264 else if (info2->rhs_code == INTEGER_CST
3265 && info2->lp_nr == merged_store->lp_nr
3266 && !last_iter)
3268 max_order = MAX (max_order, info2->order + 1);
3269 first_nonmergeable_int_order
3270 = MIN (first_nonmergeable_int_order,
3271 info2->order);
3273 else
3274 first_nonmergeable_order
3275 = MIN (first_nonmergeable_order, info2->order);
3277 if (k > i
3278 && !check_no_overlap (m_store_info, len - 1, true,
3279 min_order, try_order,
3280 merged_store->start, this_end,
3281 first_earlier, end_earlier))
3282 k = 0;
3283 if (k == 0)
3285 if (last_order == try_order)
3286 break;
3287 /* If this failed, but only because we grew
3288 try_order, retry with the last working one,
3289 so that we merge at least something. */
3290 try_order = last_order;
3291 last_iter = true;
3292 continue;
3294 last_order = try_order;
3295 /* Retry with a larger try_order to see if we could
3296 merge some further INTEGER_CST stores. */
3297 if (max_order
3298 && (first_nonmergeable_int_order
3299 < first_nonmergeable_order))
3301 try_order = MIN (max_order,
3302 first_nonmergeable_order);
3303 try_order
3304 = MIN (try_order,
3305 merged_store->first_nonmergeable_order);
3306 if (try_order > last_order && ++attempts < 16)
3307 continue;
3309 first_nonmergeable_order
3310 = MIN (first_nonmergeable_order,
3311 first_nonmergeable_int_order);
3312 end = this_end;
3313 break;
3315 while (1);
3317 if (k != 0)
3319 merged_store->merge_overlapping (info);
3321 merged_store->first_nonmergeable_order
3322 = MIN (merged_store->first_nonmergeable_order,
3323 first_nonmergeable_order);
3325 for (unsigned int j = i + 1; j <= k; j++)
3327 store_immediate_info *info2 = m_store_info[j];
3328 gcc_assert (info2->bitpos < end);
3329 if (info2->order < last_order)
3331 gcc_assert (info2->rhs_code == INTEGER_CST);
3332 if (info != info2)
3333 merged_store->merge_overlapping (info2);
3335 /* Other stores are kept and not merged in any
3336 way. */
3338 ignore = k;
3339 goto done;
3344 /* |---store 1---||---store 2---|
3345 This store is consecutive to the previous one.
3346 Merge it into the current store group. There can be gaps in between
3347 the stores, but there can't be gaps in between bitregions. */
3348 else if (info->bitregion_start <= merged_store->bitregion_end
3349 && merged_store->can_be_merged_into (info))
3351 store_immediate_info *infof = merged_store->stores[0];
3353 /* All the rhs_code ops that take 2 operands are commutative,
3354 swap the operands if it could make the operands compatible. */
3355 if (infof->ops[0].base_addr
3356 && infof->ops[1].base_addr
3357 && info->ops[0].base_addr
3358 && info->ops[1].base_addr
3359 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
3360 info->bitpos - infof->bitpos)
3361 && operand_equal_p (info->ops[1].base_addr,
3362 infof->ops[0].base_addr, 0))
3364 std::swap (info->ops[0], info->ops[1]);
3365 info->ops_swapped_p = true;
3367 if (check_no_overlap (m_store_info, i, false,
3368 MIN (merged_store->first_order, info->order),
3369 MAX (merged_store->last_order, info->order),
3370 merged_store->start,
3371 MAX (merged_store->start + merged_store->width,
3372 info->bitpos + info->bitsize),
3373 first_earlier, end_earlier))
3375 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3376 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
3378 info->rhs_code = BIT_INSERT_EXPR;
3379 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
3380 info->ops[0].base_addr = NULL_TREE;
3382 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
3384 for (store_immediate_info *infoj : merged_store->stores)
3386 infoj->rhs_code = BIT_INSERT_EXPR;
3387 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
3388 infoj->ops[0].base_addr = NULL_TREE;
3390 merged_store->bit_insertion = true;
3392 if ((infof->ops[0].base_addr
3393 ? compatible_load_p (merged_store, info, base_addr, 0)
3394 : !info->ops[0].base_addr)
3395 && (infof->ops[1].base_addr
3396 ? compatible_load_p (merged_store, info, base_addr, 1)
3397 : !info->ops[1].base_addr))
3399 merged_store->merge_into (info);
3400 goto done;
3405 /* |---store 1---| <gap> |---store 2---|.
3406 Gap between stores or the rhs not compatible. Start a new group. */
3408 /* Try to apply all the stores recorded for the group to determine
3409 the bitpattern they write and discard it if that fails.
3410 This will also reject single-store groups. */
3411 if (merged_store->apply_stores ())
3412 m_merged_store_groups.safe_push (merged_store);
3413 else
3414 delete merged_store;
3416 merged_store = new merged_store_group (info);
3417 end_earlier = i;
3418 if (dump_file && (dump_flags & TDF_DETAILS))
3419 fputs ("New store group\n", dump_file);
3421 done:
3422 if (dump_file && (dump_flags & TDF_DETAILS))
3424 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3425 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
3426 i, info->bitsize, info->bitpos);
3427 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3428 fputc ('\n', dump_file);
3432 /* Record or discard the last store group. */
3433 if (merged_store)
3435 if (merged_store->apply_stores ())
3436 m_merged_store_groups.safe_push (merged_store);
3437 else
3438 delete merged_store;
3441 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
3443 bool success
3444 = !m_merged_store_groups.is_empty ()
3445 && m_merged_store_groups.length () < m_store_info.length ();
3447 if (success && dump_file)
3448 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
3449 m_merged_store_groups.length ());
3451 return success;
3454 /* Return the type to use for the merged stores or loads described by STMTS.
3455 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3456 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3457 of the MEM_REFs if any. */
3459 static tree
3460 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3461 unsigned short *cliquep, unsigned short *basep)
3463 gimple *stmt;
3464 unsigned int i;
3465 tree type = NULL_TREE;
3466 tree ret = NULL_TREE;
3467 *cliquep = 0;
3468 *basep = 0;
3470 FOR_EACH_VEC_ELT (stmts, i, stmt)
3472 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3473 : gimple_assign_lhs (stmt);
3474 tree type1 = reference_alias_ptr_type (ref);
3475 tree base = get_base_address (ref);
3477 if (i == 0)
3479 if (TREE_CODE (base) == MEM_REF)
3481 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3482 *basep = MR_DEPENDENCE_BASE (base);
3484 ret = type = type1;
3485 continue;
3487 if (!alias_ptr_types_compatible_p (type, type1))
3488 ret = ptr_type_node;
3489 if (TREE_CODE (base) != MEM_REF
3490 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3491 || *basep != MR_DEPENDENCE_BASE (base))
3493 *cliquep = 0;
3494 *basep = 0;
3497 return ret;
3500 /* Return the location_t information we can find among the statements
3501 in STMTS. */
3503 static location_t
3504 get_location_for_stmts (vec<gimple *> &stmts)
3506 for (gimple *stmt : stmts)
3507 if (gimple_has_location (stmt))
3508 return gimple_location (stmt);
3510 return UNKNOWN_LOCATION;
3513 /* Used to decribe a store resulting from splitting a wide store in smaller
3514 regularly-sized stores in split_group. */
3516 class split_store
3518 public:
3519 unsigned HOST_WIDE_INT bytepos;
3520 unsigned HOST_WIDE_INT size;
3521 unsigned HOST_WIDE_INT align;
3522 auto_vec<store_immediate_info *> orig_stores;
3523 /* True if there is a single orig stmt covering the whole split store. */
3524 bool orig;
3525 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3526 unsigned HOST_WIDE_INT);
3529 /* Simple constructor. */
3531 split_store::split_store (unsigned HOST_WIDE_INT bp,
3532 unsigned HOST_WIDE_INT sz,
3533 unsigned HOST_WIDE_INT al)
3534 : bytepos (bp), size (sz), align (al), orig (false)
3536 orig_stores.create (0);
3539 /* Record all stores in GROUP that write to the region starting at BITPOS and
3540 is of size BITSIZE. Record infos for such statements in STORES if
3541 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3542 if there is exactly one original store in the range (in that case ignore
3543 clobber stmts, unless there are only clobber stmts). */
3545 static store_immediate_info *
3546 find_constituent_stores (class merged_store_group *group,
3547 vec<store_immediate_info *> *stores,
3548 unsigned int *first,
3549 unsigned HOST_WIDE_INT bitpos,
3550 unsigned HOST_WIDE_INT bitsize)
3552 store_immediate_info *info, *ret = NULL;
3553 unsigned int i;
3554 bool second = false;
3555 bool update_first = true;
3556 unsigned HOST_WIDE_INT end = bitpos + bitsize;
3557 for (i = *first; group->stores.iterate (i, &info); ++i)
3559 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3560 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
3561 if (stmt_end <= bitpos)
3563 /* BITPOS passed to this function never decreases from within the
3564 same split_group call, so optimize and don't scan info records
3565 which are known to end before or at BITPOS next time.
3566 Only do it if all stores before this one also pass this. */
3567 if (update_first)
3568 *first = i + 1;
3569 continue;
3571 else
3572 update_first = false;
3574 /* The stores in GROUP are ordered by bitposition so if we're past
3575 the region for this group return early. */
3576 if (stmt_start >= end)
3577 return ret;
3579 if (gimple_clobber_p (info->stmt))
3581 if (stores)
3582 stores->safe_push (info);
3583 if (ret == NULL)
3584 ret = info;
3585 continue;
3587 if (stores)
3589 stores->safe_push (info);
3590 if (ret && !gimple_clobber_p (ret->stmt))
3592 ret = NULL;
3593 second = true;
3596 else if (ret && !gimple_clobber_p (ret->stmt))
3597 return NULL;
3598 if (!second)
3599 ret = info;
3601 return ret;
3604 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3605 store have multiple uses. If any SSA_NAME has multiple uses, also
3606 count statements needed to compute it. */
3608 static unsigned
3609 count_multiple_uses (store_immediate_info *info)
3611 gimple *stmt = info->stmt;
3612 unsigned ret = 0;
3613 switch (info->rhs_code)
3615 case INTEGER_CST:
3616 case STRING_CST:
3617 return 0;
3618 case BIT_AND_EXPR:
3619 case BIT_IOR_EXPR:
3620 case BIT_XOR_EXPR:
3621 if (info->bit_not_p)
3623 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3624 ret = 1; /* Fall through below to return
3625 the BIT_NOT_EXPR stmt and then
3626 BIT_{AND,IOR,XOR}_EXPR and anything it
3627 uses. */
3628 else
3629 /* stmt is after this the BIT_NOT_EXPR. */
3630 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3632 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3634 ret += 1 + info->ops[0].bit_not_p;
3635 if (info->ops[1].base_addr)
3636 ret += 1 + info->ops[1].bit_not_p;
3637 return ret + 1;
3639 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3640 /* stmt is now the BIT_*_EXPR. */
3641 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3642 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3643 else if (info->ops[info->ops_swapped_p].bit_not_p)
3645 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3646 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3647 ++ret;
3649 if (info->ops[1].base_addr == NULL_TREE)
3651 gcc_checking_assert (!info->ops_swapped_p);
3652 return ret;
3654 if (!has_single_use (gimple_assign_rhs2 (stmt)))
3655 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3656 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
3658 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3659 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3660 ++ret;
3662 return ret;
3663 case MEM_REF:
3664 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3665 return 1 + info->ops[0].bit_not_p;
3666 else if (info->ops[0].bit_not_p)
3668 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3669 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3670 return 1;
3672 return 0;
3673 case BIT_INSERT_EXPR:
3674 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
3675 default:
3676 gcc_unreachable ();
3680 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3681 vector (if non-NULL) with split_store structs describing the byte offset
3682 (from the base), the bit size and alignment of each store as well as the
3683 original statements involved in each such split group.
3684 This is to separate the splitting strategy from the statement
3685 building/emission/linking done in output_merged_store.
3686 Return number of new stores.
3687 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3688 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3689 BZERO_FIRST may be true only when the first store covers the whole group
3690 and clears it; if BZERO_FIRST is true, keep that first store in the set
3691 unmodified and emit further stores for the overrides only.
3692 If SPLIT_STORES is NULL, it is just a dry run to count number of
3693 new stores. */
3695 static unsigned int
3696 split_group (merged_store_group *group, bool allow_unaligned_store,
3697 bool allow_unaligned_load, bool bzero_first,
3698 vec<split_store *> *split_stores,
3699 unsigned *total_orig,
3700 unsigned *total_new)
3702 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3703 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
3704 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
3705 unsigned HOST_WIDE_INT group_align = group->align;
3706 unsigned HOST_WIDE_INT align_base = group->align_base;
3707 unsigned HOST_WIDE_INT group_load_align = group_align;
3708 bool any_orig = false;
3710 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3712 /* For bswap framework using sets of stores, all the checking has been done
3713 earlier in try_coalesce_bswap and the result always needs to be emitted
3714 as a single store. Likewise for string concatenation. */
3715 if (group->stores[0]->rhs_code == LROTATE_EXPR
3716 || group->stores[0]->rhs_code == NOP_EXPR
3717 || group->string_concatenation)
3719 gcc_assert (!bzero_first);
3720 if (total_orig)
3722 /* Avoid the old/new stmt count heuristics. It should be
3723 always beneficial. */
3724 total_new[0] = 1;
3725 total_orig[0] = 2;
3728 if (split_stores)
3730 unsigned HOST_WIDE_INT align_bitpos
3731 = (group->start - align_base) & (group_align - 1);
3732 unsigned HOST_WIDE_INT align = group_align;
3733 if (align_bitpos)
3734 align = least_bit_hwi (align_bitpos);
3735 bytepos = group->start / BITS_PER_UNIT;
3736 split_store *store
3737 = new split_store (bytepos, group->width, align);
3738 unsigned int first = 0;
3739 find_constituent_stores (group, &store->orig_stores,
3740 &first, group->start, group->width);
3741 split_stores->safe_push (store);
3744 return 1;
3747 unsigned int ret = 0, first = 0;
3748 unsigned HOST_WIDE_INT try_pos = bytepos;
3750 if (total_orig)
3752 unsigned int i;
3753 store_immediate_info *info = group->stores[0];
3755 total_new[0] = 0;
3756 total_orig[0] = 1; /* The orig store. */
3757 info = group->stores[0];
3758 if (info->ops[0].base_addr)
3759 total_orig[0]++;
3760 if (info->ops[1].base_addr)
3761 total_orig[0]++;
3762 switch (info->rhs_code)
3764 case BIT_AND_EXPR:
3765 case BIT_IOR_EXPR:
3766 case BIT_XOR_EXPR:
3767 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3768 break;
3769 default:
3770 break;
3772 total_orig[0] *= group->stores.length ();
3774 FOR_EACH_VEC_ELT (group->stores, i, info)
3776 total_new[0] += count_multiple_uses (info);
3777 total_orig[0] += (info->bit_not_p
3778 + info->ops[0].bit_not_p
3779 + info->ops[1].bit_not_p);
3783 if (!allow_unaligned_load)
3784 for (int i = 0; i < 2; ++i)
3785 if (group->load_align[i])
3786 group_load_align = MIN (group_load_align, group->load_align[i]);
3788 if (bzero_first)
3790 store_immediate_info *gstore;
3791 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3792 if (!gimple_clobber_p (gstore->stmt))
3793 break;
3794 ++first;
3795 ret = 1;
3796 if (split_stores)
3798 split_store *store
3799 = new split_store (bytepos, gstore->bitsize, align_base);
3800 store->orig_stores.safe_push (gstore);
3801 store->orig = true;
3802 any_orig = true;
3803 split_stores->safe_push (store);
3807 while (size > 0)
3809 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3810 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3811 || (bzero_first && group->val[try_pos - bytepos] == 0)))
3813 /* Skip padding bytes. */
3814 ++try_pos;
3815 size -= BITS_PER_UNIT;
3816 continue;
3819 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
3820 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3821 unsigned HOST_WIDE_INT align_bitpos
3822 = (try_bitpos - align_base) & (group_align - 1);
3823 unsigned HOST_WIDE_INT align = group_align;
3824 bool found_orig = false;
3825 if (align_bitpos)
3826 align = least_bit_hwi (align_bitpos);
3827 if (!allow_unaligned_store)
3828 try_size = MIN (try_size, align);
3829 if (!allow_unaligned_load)
3831 /* If we can't do or don't want to do unaligned stores
3832 as well as loads, we need to take the loads into account
3833 as well. */
3834 unsigned HOST_WIDE_INT load_align = group_load_align;
3835 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3836 if (align_bitpos)
3837 load_align = least_bit_hwi (align_bitpos);
3838 for (int i = 0; i < 2; ++i)
3839 if (group->load_align[i])
3841 align_bitpos
3842 = known_alignment (try_bitpos
3843 - group->stores[0]->bitpos
3844 + group->stores[0]->ops[i].bitpos
3845 - group->load_align_base[i]);
3846 if (align_bitpos & (group_load_align - 1))
3848 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3849 load_align = MIN (load_align, a);
3852 try_size = MIN (try_size, load_align);
3854 store_immediate_info *info
3855 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
3856 if (info && !gimple_clobber_p (info->stmt))
3858 /* If there is just one original statement for the range, see if
3859 we can just reuse the original store which could be even larger
3860 than try_size. */
3861 unsigned HOST_WIDE_INT stmt_end
3862 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
3863 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3864 stmt_end - try_bitpos);
3865 if (info && info->bitpos >= try_bitpos)
3867 store_immediate_info *info2 = NULL;
3868 unsigned int first_copy = first;
3869 if (info->bitpos > try_bitpos
3870 && stmt_end - try_bitpos <= try_size)
3872 info2 = find_constituent_stores (group, NULL, &first_copy,
3873 try_bitpos,
3874 info->bitpos - try_bitpos);
3875 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3877 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3879 info2 = find_constituent_stores (group, NULL, &first_copy,
3880 stmt_end,
3881 (try_bitpos + try_size)
3882 - stmt_end);
3883 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3885 if (info2 == NULL)
3887 try_size = stmt_end - try_bitpos;
3888 found_orig = true;
3889 goto found;
3894 /* Approximate store bitsize for the case when there are no padding
3895 bits. */
3896 while (try_size > size)
3897 try_size /= 2;
3898 /* Now look for whole padding bytes at the end of that bitsize. */
3899 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3900 if (group->mask[try_pos - bytepos + nonmasked - 1]
3901 != (unsigned char) ~0U
3902 && (!bzero_first
3903 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
3904 break;
3905 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
3907 /* If entire try_size range is padding, skip it. */
3908 try_pos += try_size / BITS_PER_UNIT;
3909 size -= try_size;
3910 continue;
3912 /* Otherwise try to decrease try_size if second half, last 3 quarters
3913 etc. are padding. */
3914 nonmasked *= BITS_PER_UNIT;
3915 while (nonmasked <= try_size / 2)
3916 try_size /= 2;
3917 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
3919 /* Now look for whole padding bytes at the start of that bitsize. */
3920 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3921 for (masked = 0; masked < try_bytesize; ++masked)
3922 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3923 && (!bzero_first
3924 || group->val[try_pos - bytepos + masked] != 0))
3925 break;
3926 masked *= BITS_PER_UNIT;
3927 gcc_assert (masked < try_size);
3928 if (masked >= try_size / 2)
3930 while (masked >= try_size / 2)
3932 try_size /= 2;
3933 try_pos += try_size / BITS_PER_UNIT;
3934 size -= try_size;
3935 masked -= try_size;
3937 /* Need to recompute the alignment, so just retry at the new
3938 position. */
3939 continue;
3943 found:
3944 ++ret;
3946 if (split_stores)
3948 split_store *store
3949 = new split_store (try_pos, try_size, align);
3950 info = find_constituent_stores (group, &store->orig_stores,
3951 &first, try_bitpos, try_size);
3952 if (info
3953 && !gimple_clobber_p (info->stmt)
3954 && info->bitpos >= try_bitpos
3955 && info->bitpos + info->bitsize <= try_bitpos + try_size
3956 && (store->orig_stores.length () == 1
3957 || found_orig
3958 || (info->bitpos == try_bitpos
3959 && (info->bitpos + info->bitsize
3960 == try_bitpos + try_size))))
3962 store->orig = true;
3963 any_orig = true;
3965 split_stores->safe_push (store);
3968 try_pos += try_size / BITS_PER_UNIT;
3969 size -= try_size;
3972 if (total_orig)
3974 unsigned int i;
3975 split_store *store;
3976 /* If we are reusing some original stores and any of the
3977 original SSA_NAMEs had multiple uses, we need to subtract
3978 those now before we add the new ones. */
3979 if (total_new[0] && any_orig)
3981 FOR_EACH_VEC_ELT (*split_stores, i, store)
3982 if (store->orig)
3983 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3985 total_new[0] += ret; /* The new store. */
3986 store_immediate_info *info = group->stores[0];
3987 if (info->ops[0].base_addr)
3988 total_new[0] += ret;
3989 if (info->ops[1].base_addr)
3990 total_new[0] += ret;
3991 switch (info->rhs_code)
3993 case BIT_AND_EXPR:
3994 case BIT_IOR_EXPR:
3995 case BIT_XOR_EXPR:
3996 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3997 break;
3998 default:
3999 break;
4001 FOR_EACH_VEC_ELT (*split_stores, i, store)
4003 unsigned int j;
4004 bool bit_not_p[3] = { false, false, false };
4005 /* If all orig_stores have certain bit_not_p set, then
4006 we'd use a BIT_NOT_EXPR stmt and need to account for it.
4007 If some orig_stores have certain bit_not_p set, then
4008 we'd use a BIT_XOR_EXPR with a mask and need to account for
4009 it. */
4010 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
4012 if (info->ops[0].bit_not_p)
4013 bit_not_p[0] = true;
4014 if (info->ops[1].bit_not_p)
4015 bit_not_p[1] = true;
4016 if (info->bit_not_p)
4017 bit_not_p[2] = true;
4019 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
4024 return ret;
4027 /* Return the operation through which the operand IDX (if < 2) or
4028 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
4029 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
4030 the bits should be xored with mask. */
4032 static enum tree_code
4033 invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
4035 unsigned int i;
4036 store_immediate_info *info;
4037 unsigned int cnt = 0;
4038 bool any_paddings = false;
4039 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
4041 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
4042 if (bit_not_p)
4044 ++cnt;
4045 tree lhs = gimple_assign_lhs (info->stmt);
4046 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4047 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
4048 any_paddings = true;
4051 mask = NULL_TREE;
4052 if (cnt == 0)
4053 return NOP_EXPR;
4054 if (cnt == split_store->orig_stores.length () && !any_paddings)
4055 return BIT_NOT_EXPR;
4057 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
4058 unsigned buf_size = split_store->size / BITS_PER_UNIT;
4059 unsigned char *buf
4060 = XALLOCAVEC (unsigned char, buf_size);
4061 memset (buf, ~0U, buf_size);
4062 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
4064 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
4065 if (!bit_not_p)
4066 continue;
4067 /* Clear regions with bit_not_p and invert afterwards, rather than
4068 clear regions with !bit_not_p, so that gaps in between stores aren't
4069 set in the mask. */
4070 unsigned HOST_WIDE_INT bitsize = info->bitsize;
4071 unsigned HOST_WIDE_INT prec = bitsize;
4072 unsigned int pos_in_buffer = 0;
4073 if (any_paddings)
4075 tree lhs = gimple_assign_lhs (info->stmt);
4076 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4077 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
4078 prec = TYPE_PRECISION (TREE_TYPE (lhs));
4080 if (info->bitpos < try_bitpos)
4082 gcc_assert (info->bitpos + bitsize > try_bitpos);
4083 if (!BYTES_BIG_ENDIAN)
4085 if (prec <= try_bitpos - info->bitpos)
4086 continue;
4087 prec -= try_bitpos - info->bitpos;
4089 bitsize -= try_bitpos - info->bitpos;
4090 if (BYTES_BIG_ENDIAN && prec > bitsize)
4091 prec = bitsize;
4093 else
4094 pos_in_buffer = info->bitpos - try_bitpos;
4095 if (prec < bitsize)
4097 /* If this is a bool inversion, invert just the least significant
4098 prec bits rather than all bits of it. */
4099 if (BYTES_BIG_ENDIAN)
4101 pos_in_buffer += bitsize - prec;
4102 if (pos_in_buffer >= split_store->size)
4103 continue;
4105 bitsize = prec;
4107 if (pos_in_buffer + bitsize > split_store->size)
4108 bitsize = split_store->size - pos_in_buffer;
4109 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
4110 if (BYTES_BIG_ENDIAN)
4111 clear_bit_region_be (p, (BITS_PER_UNIT - 1
4112 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
4113 else
4114 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
4116 for (unsigned int i = 0; i < buf_size; ++i)
4117 buf[i] = ~buf[i];
4118 mask = native_interpret_expr (int_type, buf, buf_size);
4119 return BIT_XOR_EXPR;
4122 /* Given a merged store group GROUP output the widened version of it.
4123 The store chain is against the base object BASE.
4124 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
4125 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
4126 Make sure that the number of statements output is less than the number of
4127 original statements. If a better sequence is possible emit it and
4128 return true. */
4130 bool
4131 imm_store_chain_info::output_merged_store (merged_store_group *group)
4133 const unsigned HOST_WIDE_INT start_byte_pos
4134 = group->bitregion_start / BITS_PER_UNIT;
4135 unsigned int orig_num_stmts = group->stores.length ();
4136 if (orig_num_stmts < 2)
4137 return false;
4139 bool allow_unaligned_store
4140 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
4141 bool allow_unaligned_load = allow_unaligned_store;
4142 bool bzero_first = false;
4143 store_immediate_info *store;
4144 unsigned int num_clobber_stmts = 0;
4145 if (group->stores[0]->rhs_code == INTEGER_CST)
4147 unsigned int i;
4148 FOR_EACH_VEC_ELT (group->stores, i, store)
4149 if (gimple_clobber_p (store->stmt))
4150 num_clobber_stmts++;
4151 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
4152 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
4153 && group->start == store->bitpos
4154 && group->width == store->bitsize
4155 && (group->start % BITS_PER_UNIT) == 0
4156 && (group->width % BITS_PER_UNIT) == 0)
4158 bzero_first = true;
4159 break;
4161 else
4162 break;
4163 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
4164 if (gimple_clobber_p (store->stmt))
4165 num_clobber_stmts++;
4166 if (num_clobber_stmts == orig_num_stmts)
4167 return false;
4168 orig_num_stmts -= num_clobber_stmts;
4170 if (allow_unaligned_store || bzero_first)
4172 /* If unaligned stores are allowed, see how many stores we'd emit
4173 for unaligned and how many stores we'd emit for aligned stores.
4174 Only use unaligned stores if it allows fewer stores than aligned.
4175 Similarly, if there is a whole region clear first, prefer expanding
4176 it together compared to expanding clear first followed by merged
4177 further stores. */
4178 unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U };
4179 int pass_min = 0;
4180 for (int pass = 0; pass < 4; ++pass)
4182 if (!allow_unaligned_store && (pass & 1) != 0)
4183 continue;
4184 if (!bzero_first && (pass & 2) != 0)
4185 continue;
4186 cnt[pass] = split_group (group, (pass & 1) != 0,
4187 allow_unaligned_load, (pass & 2) != 0,
4188 NULL, NULL, NULL);
4189 if (cnt[pass] < cnt[pass_min])
4190 pass_min = pass;
4192 if ((pass_min & 1) == 0)
4193 allow_unaligned_store = false;
4194 if ((pass_min & 2) == 0)
4195 bzero_first = false;
4198 auto_vec<class split_store *, 32> split_stores;
4199 split_store *split_store;
4200 unsigned total_orig, total_new, i;
4201 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
4202 &split_stores, &total_orig, &total_new);
4204 /* Determine if there is a clobber covering the whole group at the start,
4205 followed by proposed split stores that cover the whole group. In that
4206 case, prefer the transformation even if
4207 split_stores.length () == orig_num_stmts. */
4208 bool clobber_first = false;
4209 if (num_clobber_stmts
4210 && gimple_clobber_p (group->stores[0]->stmt)
4211 && group->start == group->stores[0]->bitpos
4212 && group->width == group->stores[0]->bitsize
4213 && (group->start % BITS_PER_UNIT) == 0
4214 && (group->width % BITS_PER_UNIT) == 0)
4216 clobber_first = true;
4217 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
4218 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4219 if (split_store->bytepos != pos)
4221 clobber_first = false;
4222 break;
4224 else
4225 pos += split_store->size / BITS_PER_UNIT;
4226 if (pos != (group->start + group->width) / BITS_PER_UNIT)
4227 clobber_first = false;
4230 if (split_stores.length () >= orig_num_stmts + clobber_first)
4233 /* We didn't manage to reduce the number of statements. Bail out. */
4234 if (dump_file && (dump_flags & TDF_DETAILS))
4235 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4236 " Not profitable to emit new sequence.\n",
4237 orig_num_stmts);
4238 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4239 delete split_store;
4240 return false;
4242 if (total_orig <= total_new)
4244 /* If number of estimated new statements is above estimated original
4245 statements, bail out too. */
4246 if (dump_file && (dump_flags & TDF_DETAILS))
4247 fprintf (dump_file, "Estimated number of original stmts (%u)"
4248 " not larger than estimated number of new"
4249 " stmts (%u).\n",
4250 total_orig, total_new);
4251 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4252 delete split_store;
4253 return false;
4255 if (group->stores[0]->rhs_code == INTEGER_CST)
4257 bool all_orig = true;
4258 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4259 if (!split_store->orig)
4261 all_orig = false;
4262 break;
4264 if (all_orig)
4266 unsigned int cnt = split_stores.length ();
4267 store_immediate_info *store;
4268 FOR_EACH_VEC_ELT (group->stores, i, store)
4269 if (gimple_clobber_p (store->stmt))
4270 ++cnt;
4271 /* Punt if we wouldn't make any real changes, i.e. keep all
4272 orig stmts + all clobbers. */
4273 if (cnt == group->stores.length ())
4275 if (dump_file && (dump_flags & TDF_DETAILS))
4276 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4277 " Not profitable to emit new sequence.\n",
4278 orig_num_stmts);
4279 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4280 delete split_store;
4281 return false;
4286 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
4287 gimple_seq seq = NULL;
4288 tree last_vdef, new_vuse;
4289 last_vdef = gimple_vdef (group->last_stmt);
4290 new_vuse = gimple_vuse (group->last_stmt);
4291 tree bswap_res = NULL_TREE;
4293 /* Clobbers are not removed. */
4294 if (gimple_clobber_p (group->last_stmt))
4296 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
4297 gimple_set_vdef (group->last_stmt, new_vuse);
4300 if (group->stores[0]->rhs_code == LROTATE_EXPR
4301 || group->stores[0]->rhs_code == NOP_EXPR)
4303 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
4304 gimple *ins_stmt = group->stores[0]->ins_stmt;
4305 struct symbolic_number *n = &group->stores[0]->n;
4306 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
4308 switch (n->range)
4310 case 16:
4311 load_type = bswap_type = uint16_type_node;
4312 break;
4313 case 32:
4314 load_type = uint32_type_node;
4315 if (bswap)
4317 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
4318 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4320 break;
4321 case 64:
4322 load_type = uint64_type_node;
4323 if (bswap)
4325 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
4326 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4328 break;
4329 default:
4330 gcc_unreachable ();
4333 /* If the loads have each vuse of the corresponding store,
4334 we've checked the aliasing already in try_coalesce_bswap and
4335 we want to sink the need load into seq. So need to use new_vuse
4336 on the load. */
4337 if (n->base_addr)
4339 if (n->vuse == NULL)
4341 n->vuse = new_vuse;
4342 ins_stmt = NULL;
4344 else
4345 /* Update vuse in case it has changed by output_merged_stores. */
4346 n->vuse = gimple_vuse (ins_stmt);
4348 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
4349 bswap_type, load_type, n, bswap,
4350 ~(uint64_t) 0, 0);
4351 gcc_assert (bswap_res);
4354 gimple *stmt = NULL;
4355 auto_vec<gimple *, 32> orig_stmts;
4356 gimple_seq this_seq;
4357 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
4358 is_gimple_mem_ref_addr, NULL_TREE);
4359 gimple_seq_add_seq_without_update (&seq, this_seq);
4361 tree load_addr[2] = { NULL_TREE, NULL_TREE };
4362 gimple_seq load_seq[2] = { NULL, NULL };
4363 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
4364 for (int j = 0; j < 2; ++j)
4366 store_operand_info &op = group->stores[0]->ops[j];
4367 if (op.base_addr == NULL_TREE)
4368 continue;
4370 store_immediate_info *infol = group->stores.last ();
4371 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
4373 /* We can't pick the location randomly; while we've verified
4374 all the loads have the same vuse, they can be still in different
4375 basic blocks and we need to pick the one from the last bb:
4376 int x = q[0];
4377 if (x == N) return;
4378 int y = q[1];
4379 p[0] = x;
4380 p[1] = y;
4381 otherwise if we put the wider load at the q[0] load, we might
4382 segfault if q[1] is not mapped. */
4383 basic_block bb = gimple_bb (op.stmt);
4384 gimple *ostmt = op.stmt;
4385 store_immediate_info *info;
4386 FOR_EACH_VEC_ELT (group->stores, i, info)
4388 gimple *tstmt = info->ops[j].stmt;
4389 basic_block tbb = gimple_bb (tstmt);
4390 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
4392 ostmt = tstmt;
4393 bb = tbb;
4396 load_gsi[j] = gsi_for_stmt (ostmt);
4397 load_addr[j]
4398 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4399 &load_seq[j], is_gimple_mem_ref_addr,
4400 NULL_TREE);
4402 else if (operand_equal_p (base_addr, op.base_addr, 0))
4403 load_addr[j] = addr;
4404 else
4406 load_addr[j]
4407 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4408 &this_seq, is_gimple_mem_ref_addr,
4409 NULL_TREE);
4410 gimple_seq_add_seq_without_update (&seq, this_seq);
4414 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4416 const unsigned HOST_WIDE_INT try_size = split_store->size;
4417 const unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
4418 const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
4419 const unsigned HOST_WIDE_INT try_align = split_store->align;
4420 const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos;
4421 tree dest, src;
4422 location_t loc;
4424 if (split_store->orig)
4426 /* If there is just a single non-clobber constituent store
4427 which covers the whole area, just reuse the lhs and rhs. */
4428 gimple *orig_stmt = NULL;
4429 store_immediate_info *store;
4430 unsigned int j;
4431 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
4432 if (!gimple_clobber_p (store->stmt))
4434 orig_stmt = store->stmt;
4435 break;
4437 dest = gimple_assign_lhs (orig_stmt);
4438 src = gimple_assign_rhs1 (orig_stmt);
4439 loc = gimple_location (orig_stmt);
4441 else
4443 store_immediate_info *info;
4444 unsigned short clique, base;
4445 unsigned int k;
4446 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4447 orig_stmts.safe_push (info->stmt);
4448 tree offset_type
4449 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
4450 tree dest_type;
4451 loc = get_location_for_stmts (orig_stmts);
4452 orig_stmts.truncate (0);
4454 if (group->string_concatenation)
4455 dest_type
4456 = build_array_type_nelts (char_type_node,
4457 try_size / BITS_PER_UNIT);
4458 else
4460 dest_type = build_nonstandard_integer_type (try_size, UNSIGNED);
4461 dest_type = build_aligned_type (dest_type, try_align);
4463 dest = fold_build2 (MEM_REF, dest_type, addr,
4464 build_int_cst (offset_type, try_pos));
4465 if (TREE_CODE (dest) == MEM_REF)
4467 MR_DEPENDENCE_CLIQUE (dest) = clique;
4468 MR_DEPENDENCE_BASE (dest) = base;
4471 tree mask;
4472 if (bswap_res || group->string_concatenation)
4473 mask = integer_zero_node;
4474 else
4475 mask = native_interpret_expr (dest_type,
4476 group->mask + try_offset,
4477 group->buf_size);
4479 tree ops[2];
4480 for (int j = 0;
4481 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4482 ++j)
4484 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4485 if (bswap_res)
4486 ops[j] = bswap_res;
4487 else if (group->string_concatenation)
4489 ops[j] = build_string (try_size / BITS_PER_UNIT,
4490 (const char *) group->val + try_offset);
4491 TREE_TYPE (ops[j]) = dest_type;
4493 else if (op.base_addr)
4495 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4496 orig_stmts.safe_push (info->ops[j].stmt);
4498 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4499 &clique, &base);
4500 location_t load_loc = get_location_for_stmts (orig_stmts);
4501 orig_stmts.truncate (0);
4503 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4504 unsigned HOST_WIDE_INT align_bitpos
4505 = known_alignment (try_bitpos
4506 - split_store->orig_stores[0]->bitpos
4507 + op.bitpos);
4508 if (align_bitpos & (load_align - 1))
4509 load_align = least_bit_hwi (align_bitpos);
4511 tree load_int_type
4512 = build_nonstandard_integer_type (try_size, UNSIGNED);
4513 load_int_type
4514 = build_aligned_type (load_int_type, load_align);
4516 poly_uint64 load_pos
4517 = exact_div (try_bitpos
4518 - split_store->orig_stores[0]->bitpos
4519 + op.bitpos,
4520 BITS_PER_UNIT);
4521 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4522 build_int_cst (offset_type, load_pos));
4523 if (TREE_CODE (ops[j]) == MEM_REF)
4525 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4526 MR_DEPENDENCE_BASE (ops[j]) = base;
4528 if (!integer_zerop (mask))
4530 /* The load might load some bits (that will be masked
4531 off later on) uninitialized, avoid -W*uninitialized
4532 warnings in that case. */
4533 suppress_warning (ops[j], OPT_Wuninitialized);
4536 stmt = gimple_build_assign (make_ssa_name (dest_type), ops[j]);
4537 gimple_set_location (stmt, load_loc);
4538 if (gsi_bb (load_gsi[j]))
4540 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4541 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4543 else
4545 gimple_set_vuse (stmt, new_vuse);
4546 gimple_seq_add_stmt_without_update (&seq, stmt);
4548 ops[j] = gimple_assign_lhs (stmt);
4549 tree xor_mask;
4550 enum tree_code inv_op
4551 = invert_op (split_store, j, dest_type, xor_mask);
4552 if (inv_op != NOP_EXPR)
4554 stmt = gimple_build_assign (make_ssa_name (dest_type),
4555 inv_op, ops[j], xor_mask);
4556 gimple_set_location (stmt, load_loc);
4557 ops[j] = gimple_assign_lhs (stmt);
4559 if (gsi_bb (load_gsi[j]))
4560 gimple_seq_add_stmt_without_update (&load_seq[j],
4561 stmt);
4562 else
4563 gimple_seq_add_stmt_without_update (&seq, stmt);
4566 else
4567 ops[j] = native_interpret_expr (dest_type,
4568 group->val + try_offset,
4569 group->buf_size);
4572 switch (split_store->orig_stores[0]->rhs_code)
4574 case BIT_AND_EXPR:
4575 case BIT_IOR_EXPR:
4576 case BIT_XOR_EXPR:
4577 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4579 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4580 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4582 location_t bit_loc;
4583 bit_loc = get_location_for_stmts (orig_stmts);
4584 orig_stmts.truncate (0);
4586 stmt
4587 = gimple_build_assign (make_ssa_name (dest_type),
4588 split_store->orig_stores[0]->rhs_code,
4589 ops[0], ops[1]);
4590 gimple_set_location (stmt, bit_loc);
4591 /* If there is just one load and there is a separate
4592 load_seq[0], emit the bitwise op right after it. */
4593 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4594 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4595 /* Otherwise, if at least one load is in seq, we need to
4596 emit the bitwise op right before the store. If there
4597 are two loads and are emitted somewhere else, it would
4598 be better to emit the bitwise op as early as possible;
4599 we don't track where that would be possible right now
4600 though. */
4601 else
4602 gimple_seq_add_stmt_without_update (&seq, stmt);
4603 src = gimple_assign_lhs (stmt);
4604 tree xor_mask;
4605 enum tree_code inv_op;
4606 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
4607 if (inv_op != NOP_EXPR)
4609 stmt = gimple_build_assign (make_ssa_name (dest_type),
4610 inv_op, src, xor_mask);
4611 gimple_set_location (stmt, bit_loc);
4612 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4613 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4614 else
4615 gimple_seq_add_stmt_without_update (&seq, stmt);
4616 src = gimple_assign_lhs (stmt);
4618 break;
4619 case LROTATE_EXPR:
4620 case NOP_EXPR:
4621 src = ops[0];
4622 if (!is_gimple_val (src))
4624 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4625 src);
4626 gimple_seq_add_stmt_without_update (&seq, stmt);
4627 src = gimple_assign_lhs (stmt);
4629 if (!useless_type_conversion_p (dest_type, TREE_TYPE (src)))
4631 stmt = gimple_build_assign (make_ssa_name (dest_type),
4632 NOP_EXPR, src);
4633 gimple_seq_add_stmt_without_update (&seq, stmt);
4634 src = gimple_assign_lhs (stmt);
4636 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
4637 if (inv_op != NOP_EXPR)
4639 stmt = gimple_build_assign (make_ssa_name (dest_type),
4640 inv_op, src, xor_mask);
4641 gimple_set_location (stmt, loc);
4642 gimple_seq_add_stmt_without_update (&seq, stmt);
4643 src = gimple_assign_lhs (stmt);
4645 break;
4646 default:
4647 src = ops[0];
4648 break;
4651 /* If bit insertion is required, we use the source as an accumulator
4652 into which the successive bit-field values are manually inserted.
4653 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4654 if (group->bit_insertion)
4655 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4656 if (info->rhs_code == BIT_INSERT_EXPR
4657 && info->bitpos < try_bitpos + try_size
4658 && info->bitpos + info->bitsize > try_bitpos)
4660 /* Mask, truncate, convert to final type, shift and ior into
4661 the accumulator. Note that every step can be a no-op. */
4662 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4663 const HOST_WIDE_INT end_gap
4664 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4665 tree tem = info->ops[0].val;
4666 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4668 const unsigned HOST_WIDE_INT size
4669 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4670 tree integer_type
4671 = build_nonstandard_integer_type (size, UNSIGNED);
4672 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4673 integer_type, tem);
4675 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4677 tree bitfield_type
4678 = build_nonstandard_integer_type (info->bitsize,
4679 UNSIGNED);
4680 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4682 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
4684 const unsigned HOST_WIDE_INT imask
4685 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4686 tem = gimple_build (&seq, loc,
4687 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4688 build_int_cst (TREE_TYPE (tem),
4689 imask));
4691 const HOST_WIDE_INT shift
4692 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4693 if (shift < 0)
4694 tem = gimple_build (&seq, loc,
4695 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4696 build_int_cst (NULL_TREE, -shift));
4697 tem = gimple_convert (&seq, loc, dest_type, tem);
4698 if (shift > 0)
4699 tem = gimple_build (&seq, loc,
4700 LSHIFT_EXPR, dest_type, tem,
4701 build_int_cst (NULL_TREE, shift));
4702 src = gimple_build (&seq, loc,
4703 BIT_IOR_EXPR, dest_type, tem, src);
4706 if (!integer_zerop (mask))
4708 tree tem = make_ssa_name (dest_type);
4709 tree load_src = unshare_expr (dest);
4710 /* The load might load some or all bits uninitialized,
4711 avoid -W*uninitialized warnings in that case.
4712 As optimization, it would be nice if all the bits are
4713 provably uninitialized (no stores at all yet or previous
4714 store a CLOBBER) we'd optimize away the load and replace
4715 it e.g. with 0. */
4716 suppress_warning (load_src, OPT_Wuninitialized);
4717 stmt = gimple_build_assign (tem, load_src);
4718 gimple_set_location (stmt, loc);
4719 gimple_set_vuse (stmt, new_vuse);
4720 gimple_seq_add_stmt_without_update (&seq, stmt);
4722 /* FIXME: If there is a single chunk of zero bits in mask,
4723 perhaps use BIT_INSERT_EXPR instead? */
4724 stmt = gimple_build_assign (make_ssa_name (dest_type),
4725 BIT_AND_EXPR, tem, mask);
4726 gimple_set_location (stmt, loc);
4727 gimple_seq_add_stmt_without_update (&seq, stmt);
4728 tem = gimple_assign_lhs (stmt);
4730 if (TREE_CODE (src) == INTEGER_CST)
4731 src = wide_int_to_tree (dest_type,
4732 wi::bit_and_not (wi::to_wide (src),
4733 wi::to_wide (mask)));
4734 else
4736 tree nmask
4737 = wide_int_to_tree (dest_type,
4738 wi::bit_not (wi::to_wide (mask)));
4739 stmt = gimple_build_assign (make_ssa_name (dest_type),
4740 BIT_AND_EXPR, src, nmask);
4741 gimple_set_location (stmt, loc);
4742 gimple_seq_add_stmt_without_update (&seq, stmt);
4743 src = gimple_assign_lhs (stmt);
4745 stmt = gimple_build_assign (make_ssa_name (dest_type),
4746 BIT_IOR_EXPR, tem, src);
4747 gimple_set_location (stmt, loc);
4748 gimple_seq_add_stmt_without_update (&seq, stmt);
4749 src = gimple_assign_lhs (stmt);
4753 stmt = gimple_build_assign (dest, src);
4754 gimple_set_location (stmt, loc);
4755 gimple_set_vuse (stmt, new_vuse);
4756 gimple_seq_add_stmt_without_update (&seq, stmt);
4758 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4759 add_stmt_to_eh_lp (stmt, group->lp_nr);
4761 tree new_vdef;
4762 if (i < split_stores.length () - 1)
4763 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
4764 else
4765 new_vdef = last_vdef;
4767 gimple_set_vdef (stmt, new_vdef);
4768 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4769 new_vuse = new_vdef;
4772 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4773 delete split_store;
4775 gcc_assert (seq);
4776 if (dump_file)
4778 fprintf (dump_file,
4779 "New sequence of %u stores to replace old one of %u stores\n",
4780 split_stores.length (), orig_num_stmts);
4781 if (dump_flags & TDF_DETAILS)
4782 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4785 if (gimple_clobber_p (group->last_stmt))
4786 update_stmt (group->last_stmt);
4788 if (group->lp_nr > 0)
4790 /* We're going to insert a sequence of (potentially) throwing stores
4791 into an active EH region. This means that we're going to create
4792 new basic blocks with EH edges pointing to the post landing pad
4793 and, therefore, to have to update its PHI nodes, if any. For the
4794 virtual PHI node, we're going to use the VDEFs created above, but
4795 for the other nodes, we need to record the original reaching defs. */
4796 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4797 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4798 basic_block last_bb = gimple_bb (group->last_stmt);
4799 edge last_edge = find_edge (last_bb, lp_bb);
4800 auto_vec<tree, 16> last_defs;
4801 gphi_iterator gpi;
4802 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4804 gphi *phi = gpi.phi ();
4805 tree last_def;
4806 if (virtual_operand_p (gimple_phi_result (phi)))
4807 last_def = NULL_TREE;
4808 else
4809 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4810 last_defs.safe_push (last_def);
4813 /* Do the insertion. Then, if new basic blocks have been created in the
4814 process, rewind the chain of VDEFs create above to walk the new basic
4815 blocks and update the corresponding arguments of the PHI nodes. */
4816 update_modified_stmts (seq);
4817 if (gimple_find_sub_bbs (seq, &last_gsi))
4818 while (last_vdef != gimple_vuse (group->last_stmt))
4820 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4821 if (stmt_could_throw_p (cfun, stmt))
4823 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4824 unsigned int i;
4825 for (gpi = gsi_start_phis (lp_bb), i = 0;
4826 !gsi_end_p (gpi);
4827 gsi_next (&gpi), i++)
4829 gphi *phi = gpi.phi ();
4830 tree new_def;
4831 if (virtual_operand_p (gimple_phi_result (phi)))
4832 new_def = last_vdef;
4833 else
4834 new_def = last_defs[i];
4835 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4838 last_vdef = gimple_vuse (stmt);
4841 else
4842 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4844 for (int j = 0; j < 2; ++j)
4845 if (load_seq[j])
4846 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
4848 return true;
4851 /* Process the merged_store_group objects created in the coalescing phase.
4852 The stores are all against the base object BASE.
4853 Try to output the widened stores and delete the original statements if
4854 successful. Return true iff any changes were made. */
4856 bool
4857 imm_store_chain_info::output_merged_stores ()
4859 unsigned int i;
4860 merged_store_group *merged_store;
4861 bool ret = false;
4862 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4864 if (dbg_cnt (store_merging)
4865 && output_merged_store (merged_store))
4867 unsigned int j;
4868 store_immediate_info *store;
4869 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4871 gimple *stmt = store->stmt;
4872 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4873 /* Don't remove clobbers, they are still useful even if
4874 everything is overwritten afterwards. */
4875 if (gimple_clobber_p (stmt))
4876 continue;
4877 gsi_remove (&gsi, true);
4878 if (store->lp_nr)
4879 remove_stmt_from_eh_lp (stmt);
4880 if (stmt != merged_store->last_stmt)
4882 unlink_stmt_vdef (stmt);
4883 release_defs (stmt);
4886 ret = true;
4889 if (ret && dump_file)
4890 fprintf (dump_file, "Merging successful!\n");
4892 return ret;
4895 /* Coalesce the store_immediate_info objects recorded against the base object
4896 BASE in the first phase and output them.
4897 Delete the allocated structures.
4898 Return true if any changes were made. */
4900 bool
4901 imm_store_chain_info::terminate_and_process_chain ()
4903 if (dump_file && (dump_flags & TDF_DETAILS))
4904 fprintf (dump_file, "Terminating chain with %u stores\n",
4905 m_store_info.length ());
4906 /* Process store chain. */
4907 bool ret = false;
4908 if (m_store_info.length () > 1)
4910 ret = coalesce_immediate_stores ();
4911 if (ret)
4912 ret = output_merged_stores ();
4915 /* Delete all the entries we allocated ourselves. */
4916 store_immediate_info *info;
4917 unsigned int i;
4918 FOR_EACH_VEC_ELT (m_store_info, i, info)
4919 delete info;
4921 merged_store_group *merged_info;
4922 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4923 delete merged_info;
4925 return ret;
4928 /* Return true iff LHS is a destination potentially interesting for
4929 store merging. In practice these are the codes that get_inner_reference
4930 can process. */
4932 static bool
4933 lhs_valid_for_store_merging_p (tree lhs)
4935 if (DECL_P (lhs))
4936 return true;
4938 switch (TREE_CODE (lhs))
4940 case ARRAY_REF:
4941 case ARRAY_RANGE_REF:
4942 case BIT_FIELD_REF:
4943 case COMPONENT_REF:
4944 case MEM_REF:
4945 case VIEW_CONVERT_EXPR:
4946 return true;
4947 default:
4948 return false;
4952 /* Return true if the tree RHS is a constant we want to consider
4953 during store merging. In practice accept all codes that
4954 native_encode_expr accepts. */
4956 static bool
4957 rhs_valid_for_store_merging_p (tree rhs)
4959 unsigned HOST_WIDE_INT size;
4960 if (TREE_CODE (rhs) == CONSTRUCTOR
4961 && CONSTRUCTOR_NELTS (rhs) == 0
4962 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4963 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4964 return true;
4965 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4966 && native_encode_expr (rhs, NULL, size) != 0);
4969 /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4970 and return true on success or false on failure. */
4972 static bool
4973 adjust_bit_pos (poly_offset_int byte_off,
4974 poly_int64 *pbitpos,
4975 poly_uint64 *pbitregion_start,
4976 poly_uint64 *pbitregion_end)
4978 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4979 bit_off += *pbitpos;
4981 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4983 if (maybe_ne (*pbitregion_end, 0U))
4985 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4986 bit_off += *pbitregion_start;
4987 if (bit_off.to_uhwi (pbitregion_start))
4989 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4990 bit_off += *pbitregion_end;
4991 if (!bit_off.to_uhwi (pbitregion_end))
4992 *pbitregion_end = 0;
4994 else
4995 *pbitregion_end = 0;
4997 return true;
4999 else
5000 return false;
5003 /* If MEM is a memory reference usable for store merging (either as
5004 store destination or for loads), return the non-NULL base_addr
5005 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
5006 Otherwise return NULL, *PBITPOS should be still valid even for that
5007 case. */
5009 static tree
5010 mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
5011 poly_uint64 *pbitpos,
5012 poly_uint64 *pbitregion_start,
5013 poly_uint64 *pbitregion_end)
5015 poly_int64 bitsize, bitpos;
5016 poly_uint64 bitregion_start = 0, bitregion_end = 0;
5017 machine_mode mode;
5018 int unsignedp = 0, reversep = 0, volatilep = 0;
5019 tree offset;
5020 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
5021 &unsignedp, &reversep, &volatilep);
5022 *pbitsize = bitsize;
5023 if (known_le (bitsize, 0))
5024 return NULL_TREE;
5026 if (TREE_CODE (mem) == COMPONENT_REF
5027 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
5029 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
5030 if (maybe_ne (bitregion_end, 0U))
5031 bitregion_end += 1;
5034 if (reversep)
5035 return NULL_TREE;
5037 /* We do not want to rewrite TARGET_MEM_REFs. */
5038 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
5039 return NULL_TREE;
5040 /* In some cases get_inner_reference may return a
5041 MEM_REF [ptr + byteoffset]. For the purposes of this pass
5042 canonicalize the base_addr to MEM_REF [ptr] and take
5043 byteoffset into account in the bitpos. This occurs in
5044 PR 23684 and this way we can catch more chains. */
5045 else if (TREE_CODE (base_addr) == MEM_REF)
5047 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
5048 &bitregion_start, &bitregion_end))
5049 return NULL_TREE;
5050 base_addr = TREE_OPERAND (base_addr, 0);
5052 /* get_inner_reference returns the base object, get at its
5053 address now. */
5054 else
5056 if (maybe_lt (bitpos, 0))
5057 return NULL_TREE;
5058 base_addr = build_fold_addr_expr (base_addr);
5061 if (offset)
5063 /* If the access is variable offset then a base decl has to be
5064 address-taken to be able to emit pointer-based stores to it.
5065 ??? We might be able to get away with re-using the original
5066 base up to the first variable part and then wrapping that inside
5067 a BIT_FIELD_REF. */
5068 tree base = get_base_address (base_addr);
5069 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
5070 return NULL_TREE;
5072 /* Similarly to above for the base, remove constant from the offset. */
5073 if (TREE_CODE (offset) == PLUS_EXPR
5074 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
5075 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
5076 &bitpos, &bitregion_start, &bitregion_end))
5077 offset = TREE_OPERAND (offset, 0);
5079 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
5080 base_addr, offset);
5083 if (known_eq (bitregion_end, 0U))
5085 bitregion_start = round_down_to_byte_boundary (bitpos);
5086 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
5089 *pbitsize = bitsize;
5090 *pbitpos = bitpos;
5091 *pbitregion_start = bitregion_start;
5092 *pbitregion_end = bitregion_end;
5093 return base_addr;
5096 /* Return true if STMT is a load that can be used for store merging.
5097 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
5098 BITREGION_END are properties of the corresponding store. */
5100 static bool
5101 handled_load (gimple *stmt, store_operand_info *op,
5102 poly_uint64 bitsize, poly_uint64 bitpos,
5103 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
5105 if (!is_gimple_assign (stmt))
5106 return false;
5107 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
5109 tree rhs1 = gimple_assign_rhs1 (stmt);
5110 if (TREE_CODE (rhs1) == SSA_NAME
5111 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
5112 bitregion_start, bitregion_end))
5114 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
5115 been optimized earlier, but if allowed here, would confuse the
5116 multiple uses counting. */
5117 if (op->bit_not_p)
5118 return false;
5119 op->bit_not_p = !op->bit_not_p;
5120 return true;
5122 return false;
5124 if (gimple_vuse (stmt)
5125 && gimple_assign_load_p (stmt)
5126 && !stmt_can_throw_internal (cfun, stmt)
5127 && !gimple_has_volatile_ops (stmt))
5129 tree mem = gimple_assign_rhs1 (stmt);
5130 op->base_addr
5131 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
5132 &op->bitregion_start,
5133 &op->bitregion_end);
5134 if (op->base_addr != NULL_TREE
5135 && known_eq (op->bitsize, bitsize)
5136 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
5137 && known_ge (op->bitpos - op->bitregion_start,
5138 bitpos - bitregion_start)
5139 && known_ge (op->bitregion_end - op->bitpos,
5140 bitregion_end - bitpos))
5142 op->stmt = stmt;
5143 op->val = mem;
5144 op->bit_not_p = false;
5145 return true;
5148 return false;
5151 /* Return the index number of the landing pad for STMT, if any. */
5153 static int
5154 lp_nr_for_store (gimple *stmt)
5156 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
5157 return 0;
5159 if (!stmt_could_throw_p (cfun, stmt))
5160 return 0;
5162 return lookup_stmt_eh_lp (stmt);
5165 /* Record the store STMT for store merging optimization if it can be
5166 optimized. Return true if any changes were made. */
5168 bool
5169 pass_store_merging::process_store (gimple *stmt)
5171 tree lhs = gimple_assign_lhs (stmt);
5172 tree rhs = gimple_assign_rhs1 (stmt);
5173 poly_uint64 bitsize, bitpos = 0;
5174 poly_uint64 bitregion_start = 0, bitregion_end = 0;
5175 tree base_addr
5176 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
5177 &bitregion_start, &bitregion_end);
5178 if (known_eq (bitsize, 0U))
5179 return false;
5181 bool invalid = (base_addr == NULL_TREE
5182 || (maybe_gt (bitsize,
5183 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
5184 && TREE_CODE (rhs) != INTEGER_CST
5185 && (TREE_CODE (rhs) != CONSTRUCTOR
5186 || CONSTRUCTOR_NELTS (rhs) != 0)));
5187 enum tree_code rhs_code = ERROR_MARK;
5188 bool bit_not_p = false;
5189 struct symbolic_number n;
5190 gimple *ins_stmt = NULL;
5191 store_operand_info ops[2];
5192 if (invalid)
5194 else if (TREE_CODE (rhs) == STRING_CST)
5196 rhs_code = STRING_CST;
5197 ops[0].val = rhs;
5199 else if (rhs_valid_for_store_merging_p (rhs))
5201 rhs_code = INTEGER_CST;
5202 ops[0].val = rhs;
5204 else if (TREE_CODE (rhs) == SSA_NAME)
5206 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
5207 if (!is_gimple_assign (def_stmt))
5208 invalid = true;
5209 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
5210 bitregion_start, bitregion_end))
5211 rhs_code = MEM_REF;
5212 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
5214 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5215 if (TREE_CODE (rhs1) == SSA_NAME
5216 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
5218 bit_not_p = true;
5219 def_stmt = SSA_NAME_DEF_STMT (rhs1);
5223 if (rhs_code == ERROR_MARK && !invalid)
5224 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
5226 case BIT_AND_EXPR:
5227 case BIT_IOR_EXPR:
5228 case BIT_XOR_EXPR:
5229 tree rhs1, rhs2;
5230 rhs1 = gimple_assign_rhs1 (def_stmt);
5231 rhs2 = gimple_assign_rhs2 (def_stmt);
5232 invalid = true;
5233 if (TREE_CODE (rhs1) != SSA_NAME)
5234 break;
5235 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
5236 if (!is_gimple_assign (def_stmt1)
5237 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
5238 bitregion_start, bitregion_end))
5239 break;
5240 if (rhs_valid_for_store_merging_p (rhs2))
5241 ops[1].val = rhs2;
5242 else if (TREE_CODE (rhs2) != SSA_NAME)
5243 break;
5244 else
5246 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
5247 if (!is_gimple_assign (def_stmt2))
5248 break;
5249 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
5250 bitregion_start, bitregion_end))
5251 break;
5253 invalid = false;
5254 break;
5255 default:
5256 invalid = true;
5257 break;
5260 unsigned HOST_WIDE_INT const_bitsize;
5261 if (bitsize.is_constant (&const_bitsize)
5262 && (const_bitsize % BITS_PER_UNIT) == 0
5263 && const_bitsize <= 64
5264 && multiple_p (bitpos, BITS_PER_UNIT))
5266 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
5267 if (ins_stmt)
5269 uint64_t nn = n.n;
5270 for (unsigned HOST_WIDE_INT i = 0;
5271 i < const_bitsize;
5272 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
5273 if ((nn & MARKER_MASK) == 0
5274 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
5276 ins_stmt = NULL;
5277 break;
5279 if (ins_stmt)
5281 if (invalid)
5283 rhs_code = LROTATE_EXPR;
5284 ops[0].base_addr = NULL_TREE;
5285 ops[1].base_addr = NULL_TREE;
5287 invalid = false;
5292 if (invalid
5293 && bitsize.is_constant (&const_bitsize)
5294 && ((const_bitsize % BITS_PER_UNIT) != 0
5295 || !multiple_p (bitpos, BITS_PER_UNIT))
5296 && const_bitsize <= MAX_FIXED_MODE_SIZE)
5298 /* Bypass a conversion to the bit-field type. */
5299 if (!bit_not_p
5300 && is_gimple_assign (def_stmt)
5301 && CONVERT_EXPR_CODE_P (rhs_code))
5303 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5304 if (TREE_CODE (rhs1) == SSA_NAME
5305 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
5306 rhs = rhs1;
5308 rhs_code = BIT_INSERT_EXPR;
5309 bit_not_p = false;
5310 ops[0].val = rhs;
5311 ops[0].base_addr = NULL_TREE;
5312 ops[1].base_addr = NULL_TREE;
5313 invalid = false;
5316 else
5317 invalid = true;
5319 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
5320 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
5321 if (invalid
5322 || !bitsize.is_constant (&const_bitsize)
5323 || !bitpos.is_constant (&const_bitpos)
5324 || !bitregion_start.is_constant (&const_bitregion_start)
5325 || !bitregion_end.is_constant (&const_bitregion_end))
5326 return terminate_all_aliasing_chains (NULL, stmt);
5328 if (!ins_stmt)
5329 memset (&n, 0, sizeof (n));
5331 class imm_store_chain_info **chain_info = NULL;
5332 bool ret = false;
5333 if (base_addr)
5334 chain_info = m_stores.get (base_addr);
5336 store_immediate_info *info;
5337 if (chain_info)
5339 unsigned int ord = (*chain_info)->m_store_info.length ();
5340 info = new store_immediate_info (const_bitsize, const_bitpos,
5341 const_bitregion_start,
5342 const_bitregion_end,
5343 stmt, ord, rhs_code, n, ins_stmt,
5344 bit_not_p, lp_nr_for_store (stmt),
5345 ops[0], ops[1]);
5346 if (dump_file && (dump_flags & TDF_DETAILS))
5348 fprintf (dump_file, "Recording immediate store from stmt:\n");
5349 print_gimple_stmt (dump_file, stmt, 0);
5351 (*chain_info)->m_store_info.safe_push (info);
5352 m_n_stores++;
5353 ret |= terminate_all_aliasing_chains (chain_info, stmt);
5354 /* If we reach the limit of stores to merge in a chain terminate and
5355 process the chain now. */
5356 if ((*chain_info)->m_store_info.length ()
5357 == (unsigned int) param_max_stores_to_merge)
5359 if (dump_file && (dump_flags & TDF_DETAILS))
5360 fprintf (dump_file,
5361 "Reached maximum number of statements to merge:\n");
5362 ret |= terminate_and_process_chain (*chain_info);
5365 else
5367 /* Store aliases any existing chain? */
5368 ret |= terminate_all_aliasing_chains (NULL, stmt);
5370 /* Start a new chain. */
5371 class imm_store_chain_info *new_chain
5372 = new imm_store_chain_info (m_stores_head, base_addr);
5373 info = new store_immediate_info (const_bitsize, const_bitpos,
5374 const_bitregion_start,
5375 const_bitregion_end,
5376 stmt, 0, rhs_code, n, ins_stmt,
5377 bit_not_p, lp_nr_for_store (stmt),
5378 ops[0], ops[1]);
5379 new_chain->m_store_info.safe_push (info);
5380 m_n_stores++;
5381 m_stores.put (base_addr, new_chain);
5382 m_n_chains++;
5383 if (dump_file && (dump_flags & TDF_DETAILS))
5385 fprintf (dump_file, "Starting active chain number %u with statement:\n",
5386 m_n_chains);
5387 print_gimple_stmt (dump_file, stmt, 0);
5388 fprintf (dump_file, "The base object is:\n");
5389 print_generic_expr (dump_file, base_addr);
5390 fprintf (dump_file, "\n");
5394 /* Prune oldest chains so that after adding the chain or store above
5395 we're again within the limits set by the params. */
5396 if (m_n_chains > (unsigned)param_max_store_chains_to_track
5397 || m_n_stores > (unsigned)param_max_stores_to_track)
5399 if (dump_file && (dump_flags & TDF_DETAILS))
5400 fprintf (dump_file, "Too many chains (%u > %d) or stores (%u > %d), "
5401 "terminating oldest chain(s).\n", m_n_chains,
5402 param_max_store_chains_to_track, m_n_stores,
5403 param_max_stores_to_track);
5404 imm_store_chain_info **e = &m_stores_head;
5405 unsigned idx = 0;
5406 unsigned n_stores = 0;
5407 while (*e)
5409 if (idx >= (unsigned)param_max_store_chains_to_track
5410 || (n_stores + (*e)->m_store_info.length ()
5411 > (unsigned)param_max_stores_to_track))
5412 ret |= terminate_and_process_chain (*e);
5413 else
5415 n_stores += (*e)->m_store_info.length ();
5416 e = &(*e)->next;
5417 ++idx;
5422 return ret;
5425 /* Return true if STMT is a store valid for store merging. */
5427 static bool
5428 store_valid_for_store_merging_p (gimple *stmt)
5430 return gimple_assign_single_p (stmt)
5431 && gimple_vdef (stmt)
5432 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
5433 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
5436 enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
5438 /* Return the status of basic block BB wrt store merging. */
5440 static enum basic_block_status
5441 get_status_for_store_merging (basic_block bb)
5443 unsigned int num_statements = 0;
5444 unsigned int num_constructors = 0;
5445 gimple_stmt_iterator gsi;
5446 edge e;
5447 gimple *last_stmt = NULL;
5449 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5451 gimple *stmt = gsi_stmt (gsi);
5453 if (is_gimple_debug (stmt))
5454 continue;
5456 last_stmt = stmt;
5458 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
5459 break;
5461 if (is_gimple_assign (stmt)
5462 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR)
5464 tree rhs = gimple_assign_rhs1 (stmt);
5465 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
5466 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
5467 && gimple_assign_lhs (stmt) != NULL_TREE)
5469 HOST_WIDE_INT sz
5470 = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
5471 if (sz == 16 || sz == 32 || sz == 64)
5473 num_constructors = 1;
5474 break;
5480 if (num_statements == 0 && num_constructors == 0)
5481 return BB_INVALID;
5483 if (cfun->can_throw_non_call_exceptions && cfun->eh
5484 && store_valid_for_store_merging_p (last_stmt)
5485 && (e = find_fallthru_edge (bb->succs))
5486 && e->dest == bb->next_bb)
5487 return BB_EXTENDED_VALID;
5489 return (num_statements >= 2 || num_constructors) ? BB_VALID : BB_INVALID;
5492 /* Entry point for the pass. Go over each basic block recording chains of
5493 immediate stores. Upon encountering a terminating statement (as defined
5494 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5495 variants. */
5497 unsigned int
5498 pass_store_merging::execute (function *fun)
5500 basic_block bb;
5501 hash_set<gimple *> orig_stmts;
5502 bool changed = false, open_chains = false;
5504 /* If the function can throw and catch non-call exceptions, we'll be trying
5505 to merge stores across different basic blocks so we need to first unsplit
5506 the EH edges in order to streamline the CFG of the function. */
5507 if (cfun->can_throw_non_call_exceptions && cfun->eh)
5508 unsplit_eh_edges ();
5510 calculate_dominance_info (CDI_DOMINATORS);
5512 FOR_EACH_BB_FN (bb, fun)
5514 const basic_block_status bb_status = get_status_for_store_merging (bb);
5515 gimple_stmt_iterator gsi;
5517 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
5519 changed |= terminate_and_process_all_chains ();
5520 open_chains = false;
5523 if (bb_status == BB_INVALID)
5524 continue;
5526 if (dump_file && (dump_flags & TDF_DETAILS))
5527 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
5529 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
5531 gimple *stmt = gsi_stmt (gsi);
5532 gsi_next (&gsi);
5534 if (is_gimple_debug (stmt))
5535 continue;
5537 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
5539 /* Terminate all chains. */
5540 if (dump_file && (dump_flags & TDF_DETAILS))
5541 fprintf (dump_file, "Volatile access terminates "
5542 "all chains\n");
5543 changed |= terminate_and_process_all_chains ();
5544 open_chains = false;
5545 continue;
5548 if (is_gimple_assign (stmt)
5549 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR
5550 && maybe_optimize_vector_constructor (stmt))
5551 continue;
5553 if (store_valid_for_store_merging_p (stmt))
5554 changed |= process_store (stmt);
5555 else
5556 changed |= terminate_all_aliasing_chains (NULL, stmt);
5559 if (bb_status == BB_EXTENDED_VALID)
5560 open_chains = true;
5561 else
5563 changed |= terminate_and_process_all_chains ();
5564 open_chains = false;
5568 if (open_chains)
5569 changed |= terminate_and_process_all_chains ();
5571 /* If the function can throw and catch non-call exceptions and something
5572 changed during the pass, then the CFG has (very likely) changed too. */
5573 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5575 free_dominance_info (CDI_DOMINATORS);
5576 return TODO_cleanup_cfg;
5579 return 0;
5582 } // anon namespace
5584 /* Construct and return a store merging pass object. */
5586 gimple_opt_pass *
5587 make_pass_store_merging (gcc::context *ctxt)
5589 return new pass_store_merging (ctxt);
5592 #if CHECKING_P
5594 namespace selftest {
5596 /* Selftests for store merging helpers. */
5598 /* Assert that all elements of the byte arrays X and Y, both of length N
5599 are equal. */
5601 static void
5602 verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5604 for (unsigned int i = 0; i < n; i++)
5606 if (x[i] != y[i])
5608 fprintf (stderr, "Arrays do not match. X:\n");
5609 dump_char_array (stderr, x, n);
5610 fprintf (stderr, "Y:\n");
5611 dump_char_array (stderr, y, n);
5613 ASSERT_EQ (x[i], y[i]);
5617 /* Test shift_bytes_in_array_left and that it carries bits across between
5618 bytes correctly. */
5620 static void
5621 verify_shift_bytes_in_array_left (void)
5623 /* byte 1 | byte 0
5624 00011111 | 11100000. */
5625 unsigned char orig[2] = { 0xe0, 0x1f };
5626 unsigned char in[2];
5627 memcpy (in, orig, sizeof orig);
5629 unsigned char expected[2] = { 0x80, 0x7f };
5630 shift_bytes_in_array_left (in, sizeof (in), 2);
5631 verify_array_eq (in, expected, sizeof (in));
5633 memcpy (in, orig, sizeof orig);
5634 memcpy (expected, orig, sizeof orig);
5635 /* Check that shifting by zero doesn't change anything. */
5636 shift_bytes_in_array_left (in, sizeof (in), 0);
5637 verify_array_eq (in, expected, sizeof (in));
5641 /* Test shift_bytes_in_array_right and that it carries bits across between
5642 bytes correctly. */
5644 static void
5645 verify_shift_bytes_in_array_right (void)
5647 /* byte 1 | byte 0
5648 00011111 | 11100000. */
5649 unsigned char orig[2] = { 0x1f, 0xe0};
5650 unsigned char in[2];
5651 memcpy (in, orig, sizeof orig);
5652 unsigned char expected[2] = { 0x07, 0xf8};
5653 shift_bytes_in_array_right (in, sizeof (in), 2);
5654 verify_array_eq (in, expected, sizeof (in));
5656 memcpy (in, orig, sizeof orig);
5657 memcpy (expected, orig, sizeof orig);
5658 /* Check that shifting by zero doesn't change anything. */
5659 shift_bytes_in_array_right (in, sizeof (in), 0);
5660 verify_array_eq (in, expected, sizeof (in));
5663 /* Test clear_bit_region that it clears exactly the bits asked and
5664 nothing more. */
5666 static void
5667 verify_clear_bit_region (void)
5669 /* Start with all bits set and test clearing various patterns in them. */
5670 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5671 unsigned char in[3];
5672 unsigned char expected[3];
5673 memcpy (in, orig, sizeof in);
5675 /* Check zeroing out all the bits. */
5676 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5677 expected[0] = expected[1] = expected[2] = 0;
5678 verify_array_eq (in, expected, sizeof in);
5680 memcpy (in, orig, sizeof in);
5681 /* Leave the first and last bits intact. */
5682 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5683 expected[0] = 0x1;
5684 expected[1] = 0;
5685 expected[2] = 0x80;
5686 verify_array_eq (in, expected, sizeof in);
5689 /* Test clear_bit_region_be that it clears exactly the bits asked and
5690 nothing more. */
5692 static void
5693 verify_clear_bit_region_be (void)
5695 /* Start with all bits set and test clearing various patterns in them. */
5696 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5697 unsigned char in[3];
5698 unsigned char expected[3];
5699 memcpy (in, orig, sizeof in);
5701 /* Check zeroing out all the bits. */
5702 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5703 expected[0] = expected[1] = expected[2] = 0;
5704 verify_array_eq (in, expected, sizeof in);
5706 memcpy (in, orig, sizeof in);
5707 /* Leave the first and last bits intact. */
5708 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5709 expected[0] = 0x80;
5710 expected[1] = 0;
5711 expected[2] = 0x1;
5712 verify_array_eq (in, expected, sizeof in);
5716 /* Run all of the selftests within this file. */
5718 void
5719 store_merging_cc_tests (void)
5721 verify_shift_bytes_in_array_left ();
5722 verify_shift_bytes_in_array_right ();
5723 verify_clear_bit_region ();
5724 verify_clear_bit_region_be ();
5727 } // namespace selftest
5728 #endif /* CHECKING_P. */