Add assember CFI directives to millicode division and remainder routines.
[official-gcc.git] / gcc / config / arm / aarch-common.cc
blob5b96ff4c2e89722a2cad686bd78ce12721a02010
1 /* Dependency checks for instruction scheduling, shared between ARM and
2 AARCH64.
4 Copyright (C) 1991-2023 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
24 #define IN_TARGET_CODE 1
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "insn-modes.h"
30 #include "tm.h"
31 #include "rtl.h"
32 #include "rtl-iter.h"
33 #include "memmodel.h"
34 #include "diagnostic.h"
35 #include "tree.h"
36 #include "expr.h"
37 #include "function.h"
38 #include "emit-rtl.h"
39 #include "aarch-common.h"
41 /* Return TRUE if X is either an arithmetic shift left, or
42 is a multiplication by a power of two. */
43 bool
44 arm_rtx_shift_left_p (rtx x)
46 enum rtx_code code = GET_CODE (x);
48 if (code == MULT && CONST_INT_P (XEXP (x, 1))
49 && exact_log2 (INTVAL (XEXP (x, 1))) > 0)
50 return true;
52 if (code == ASHIFT)
53 return true;
55 return false;
58 static rtx_code shift_rtx_codes[] =
59 { ASHIFT, ROTATE, ASHIFTRT, LSHIFTRT,
60 ROTATERT, ZERO_EXTEND, SIGN_EXTEND };
62 /* Traverse PATTERN looking for a sub-rtx with RTX_CODE CODE.
63 If FIND_ANY_SHIFT then we are interested in anything which can
64 reasonably be described as a SHIFT RTX. */
65 static rtx
66 arm_find_sub_rtx_with_code (rtx pattern, rtx_code code, bool find_any_shift)
68 subrtx_var_iterator::array_type array;
69 FOR_EACH_SUBRTX_VAR (iter, array, pattern, NONCONST)
71 rtx x = *iter;
72 if (find_any_shift)
74 /* Left shifts might have been canonicalized to a MULT of some
75 power of two. Make sure we catch them. */
76 if (arm_rtx_shift_left_p (x))
77 return x;
78 else
79 for (unsigned int i = 0; i < ARRAY_SIZE (shift_rtx_codes); i++)
80 if (GET_CODE (x) == shift_rtx_codes[i])
81 return x;
84 if (GET_CODE (x) == code)
85 return x;
87 return NULL_RTX;
90 /* Traverse PATTERN looking for any sub-rtx which looks like a shift. */
91 static rtx
92 arm_find_shift_sub_rtx (rtx pattern)
94 return arm_find_sub_rtx_with_code (pattern, ASHIFT, true);
97 /* PRODUCER and CONSUMER are two potentially dependant RTX. PRODUCER
98 (possibly) contains a SET which will provide a result we can access
99 using the SET_DEST macro. We will place the RTX which would be
100 written by PRODUCER in SET_SOURCE.
101 Similarly, CONSUMER (possibly) contains a SET which has an operand
102 we can access using SET_SRC. We place this operand in
103 SET_DESTINATION.
105 Return nonzero if we found the SET RTX we expected. */
106 static int
107 arm_get_set_operands (rtx producer, rtx consumer,
108 rtx *set_source, rtx *set_destination)
110 rtx set_producer = arm_find_sub_rtx_with_code (PATTERN (producer),
111 SET, false);
112 rtx set_consumer = arm_find_sub_rtx_with_code (PATTERN (consumer),
113 SET, false);
115 if (set_producer && set_consumer)
117 *set_source = SET_DEST (set_producer);
118 *set_destination = SET_SRC (set_consumer);
119 return 1;
121 return 0;
124 bool
125 aarch_rev16_shright_mask_imm_p (rtx val, machine_mode mode)
127 return CONST_INT_P (val)
128 && INTVAL (val)
129 == trunc_int_for_mode (HOST_WIDE_INT_C (0xff00ff00ff00ff),
130 mode);
133 bool
134 aarch_rev16_shleft_mask_imm_p (rtx val, machine_mode mode)
136 return CONST_INT_P (val)
137 && INTVAL (val)
138 == trunc_int_for_mode (HOST_WIDE_INT_C (0xff00ff00ff00ff00),
139 mode);
143 static bool
144 aarch_rev16_p_1 (rtx lhs, rtx rhs, machine_mode mode)
146 if (GET_CODE (lhs) == AND
147 && GET_CODE (XEXP (lhs, 0)) == ASHIFT
148 && CONST_INT_P (XEXP (XEXP (lhs, 0), 1))
149 && INTVAL (XEXP (XEXP (lhs, 0), 1)) == 8
150 && REG_P (XEXP (XEXP (lhs, 0), 0))
151 && CONST_INT_P (XEXP (lhs, 1))
152 && GET_CODE (rhs) == AND
153 && GET_CODE (XEXP (rhs, 0)) == LSHIFTRT
154 && REG_P (XEXP (XEXP (rhs, 0), 0))
155 && CONST_INT_P (XEXP (XEXP (rhs, 0), 1))
156 && INTVAL (XEXP (XEXP (rhs, 0), 1)) == 8
157 && CONST_INT_P (XEXP (rhs, 1))
158 && REGNO (XEXP (XEXP (rhs, 0), 0)) == REGNO (XEXP (XEXP (lhs, 0), 0)))
161 rtx lhs_mask = XEXP (lhs, 1);
162 rtx rhs_mask = XEXP (rhs, 1);
164 return aarch_rev16_shright_mask_imm_p (rhs_mask, mode)
165 && aarch_rev16_shleft_mask_imm_p (lhs_mask, mode);
168 return false;
171 /* Recognise a sequence of bitwise operations corresponding to a rev16 operation.
172 These will be of the form:
173 ((x >> 8) & 0x00ff00ff)
174 | ((x << 8) & 0xff00ff00)
175 for SImode and with similar but wider bitmasks for DImode.
176 The two sub-expressions of the IOR can appear on either side so check both
177 permutations with the help of aarch_rev16_p_1 above. */
179 bool
180 aarch_rev16_p (rtx x)
182 rtx left_sub_rtx, right_sub_rtx;
183 bool is_rev = false;
185 if (GET_CODE (x) != IOR)
186 return false;
188 left_sub_rtx = XEXP (x, 0);
189 right_sub_rtx = XEXP (x, 1);
191 /* There are no canonicalisation rules for the position of the two shifts
192 involved in a rev, so try both permutations. */
193 is_rev = aarch_rev16_p_1 (left_sub_rtx, right_sub_rtx, GET_MODE (x));
195 if (!is_rev)
196 is_rev = aarch_rev16_p_1 (right_sub_rtx, left_sub_rtx, GET_MODE (x));
198 return is_rev;
201 /* Return non-zero if the RTX representing a memory model is a memory model
202 that needs acquire semantics. */
203 bool
204 aarch_mm_needs_acquire (rtx const_int)
206 enum memmodel model = memmodel_from_int (INTVAL (const_int));
207 return !(is_mm_relaxed (model)
208 || is_mm_consume (model)
209 || is_mm_release (model));
212 /* Return non-zero if the RTX representing a memory model is a memory model
213 that needs release semantics. */
214 bool
215 aarch_mm_needs_release (rtx const_int)
217 enum memmodel model = memmodel_from_int (INTVAL (const_int));
218 return !(is_mm_relaxed (model)
219 || is_mm_consume (model)
220 || is_mm_acquire (model));
223 /* Return nonzero if the CONSUMER instruction (a load) does need
224 PRODUCER's value to calculate the address. */
226 arm_early_load_addr_dep (rtx producer, rtx consumer)
228 rtx value, addr;
230 if (!arm_get_set_operands (producer, consumer, &value, &addr))
231 return 0;
233 return reg_overlap_mentioned_p (value, addr);
236 /* Return nonzero if the CONSUMER instruction (a load) does need
237 a Pmode PRODUCER's value to calculate the address. */
240 arm_early_load_addr_dep_ptr (rtx producer, rtx consumer)
242 rtx value = arm_find_sub_rtx_with_code (PATTERN (producer), SET, false);
243 rtx addr = arm_find_sub_rtx_with_code (PATTERN (consumer), SET, false);
245 if (!value || !addr || !MEM_P (SET_SRC (value)))
246 return 0;
248 value = SET_DEST (value);
249 addr = SET_SRC (addr);
251 return GET_MODE (value) == Pmode && reg_overlap_mentioned_p (value, addr);
254 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
255 have an early register shift value or amount dependency on the
256 result of PRODUCER. */
258 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
260 rtx value, op;
261 rtx early_op;
263 if (!arm_get_set_operands (producer, consumer, &value, &op))
264 return 0;
266 if ((early_op = arm_find_shift_sub_rtx (op)))
267 return !reg_overlap_mentioned_p (value, early_op);
269 return 0;
272 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
273 have an early register shift value dependency on the result of
274 PRODUCER. */
276 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
278 rtx value, op;
279 rtx early_op;
281 if (!arm_get_set_operands (producer, consumer, &value, &op))
282 return 0;
284 if ((early_op = arm_find_shift_sub_rtx (op)))
285 /* We want to check the value being shifted. */
286 if (!reg_overlap_mentioned_p (value, XEXP (early_op, 0)))
287 return 1;
289 return 0;
292 /* Return nonzero if the CONSUMER (a mul or mac op) does not
293 have an early register mult dependency on the result of
294 PRODUCER. */
296 arm_no_early_mul_dep (rtx producer, rtx consumer)
298 rtx value, op;
300 if (!arm_get_set_operands (producer, consumer, &value, &op))
301 return 0;
303 if (GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
305 if (GET_CODE (XEXP (op, 0)) == MULT)
306 return !reg_overlap_mentioned_p (value, XEXP (op, 0));
307 else
308 return !reg_overlap_mentioned_p (value, XEXP (op, 1));
311 return 0;
314 /* Return nonzero if the CONSUMER instruction (a store) does not need
315 PRODUCER's value to calculate the address. */
318 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
320 rtx value = arm_find_sub_rtx_with_code (PATTERN (producer), SET, false);
321 rtx addr = arm_find_sub_rtx_with_code (PATTERN (consumer), SET, false);
323 if (value)
324 value = SET_DEST (value);
326 if (addr)
327 addr = SET_DEST (addr);
329 if (!value || !addr)
330 return 0;
332 return !reg_overlap_mentioned_p (value, addr);
335 /* Return nonzero if the CONSUMER instruction (a store) does need
336 PRODUCER's value to calculate the address. */
339 arm_early_store_addr_dep (rtx producer, rtx consumer)
341 return !arm_no_early_store_addr_dep (producer, consumer);
344 /* Return nonzero if the CONSUMER instruction (a store) does need
345 a Pmode PRODUCER's value to calculate the address. */
348 arm_early_store_addr_dep_ptr (rtx producer, rtx consumer)
350 rtx value = arm_find_sub_rtx_with_code (PATTERN (producer), SET, false);
351 rtx addr = arm_find_sub_rtx_with_code (PATTERN (consumer), SET, false);
353 if (!value || !addr || !MEM_P (SET_SRC (value)))
354 return 0;
356 value = SET_DEST (value);
357 addr = SET_DEST (addr);
359 return GET_MODE (value) == Pmode && reg_overlap_mentioned_p (value, addr);
362 /* Return non-zero iff the consumer (a multiply-accumulate or a
363 multiple-subtract instruction) has an accumulator dependency on the
364 result of the producer and no other dependency on that result. It
365 does not check if the producer is multiply-accumulate instruction. */
367 arm_mac_accumulator_is_result (rtx producer, rtx consumer)
369 rtx result;
370 rtx op0, op1, acc;
372 producer = PATTERN (producer);
373 consumer = PATTERN (consumer);
375 if (GET_CODE (producer) == COND_EXEC)
376 producer = COND_EXEC_CODE (producer);
377 if (GET_CODE (consumer) == COND_EXEC)
378 consumer = COND_EXEC_CODE (consumer);
380 if (GET_CODE (producer) != SET)
381 return 0;
383 result = XEXP (producer, 0);
385 if (GET_CODE (consumer) != SET)
386 return 0;
388 /* Check that the consumer is of the form
389 (set (...) (plus (mult ...) (...)))
391 (set (...) (minus (...) (mult ...))). */
392 if (GET_CODE (XEXP (consumer, 1)) == PLUS)
394 if (GET_CODE (XEXP (XEXP (consumer, 1), 0)) != MULT)
395 return 0;
397 op0 = XEXP (XEXP (XEXP (consumer, 1), 0), 0);
398 op1 = XEXP (XEXP (XEXP (consumer, 1), 0), 1);
399 acc = XEXP (XEXP (consumer, 1), 1);
401 else if (GET_CODE (XEXP (consumer, 1)) == MINUS)
403 if (GET_CODE (XEXP (XEXP (consumer, 1), 1)) != MULT)
404 return 0;
406 op0 = XEXP (XEXP (XEXP (consumer, 1), 1), 0);
407 op1 = XEXP (XEXP (XEXP (consumer, 1), 1), 1);
408 acc = XEXP (XEXP (consumer, 1), 0);
410 else
411 return 0;
413 return (reg_overlap_mentioned_p (result, acc)
414 && !reg_overlap_mentioned_p (result, op0)
415 && !reg_overlap_mentioned_p (result, op1));
418 /* Return non-zero if the destination of PRODUCER feeds the accumulator
419 operand of an MLA-like operation. */
422 aarch_accumulator_forwarding (rtx_insn *producer, rtx_insn *consumer)
424 rtx producer_set = single_set (producer);
425 rtx consumer_set = single_set (consumer);
427 /* We are looking for a SET feeding a SET. */
428 if (!producer_set || !consumer_set)
429 return 0;
431 rtx dest = SET_DEST (producer_set);
432 rtx mla = SET_SRC (consumer_set);
434 /* We're looking for a register SET. */
435 if (!REG_P (dest))
436 return 0;
438 rtx accumulator;
440 /* Strip a zero_extend. */
441 if (GET_CODE (mla) == ZERO_EXTEND)
442 mla = XEXP (mla, 0);
444 switch (GET_CODE (mla))
446 case PLUS:
447 /* Possibly an MADD. */
448 if (GET_CODE (XEXP (mla, 0)) == MULT)
449 accumulator = XEXP (mla, 1);
450 else
451 return 0;
452 break;
453 case MINUS:
454 /* Possibly an MSUB. */
455 if (GET_CODE (XEXP (mla, 1)) == MULT)
456 accumulator = XEXP (mla, 0);
457 else
458 return 0;
459 break;
460 case FMA:
462 /* Possibly an FMADD/FMSUB/FNMADD/FNMSUB. */
463 if (REG_P (XEXP (mla, 1))
464 && REG_P (XEXP (mla, 2))
465 && (REG_P (XEXP (mla, 0))
466 || GET_CODE (XEXP (mla, 0)) == NEG))
469 /* FMADD/FMSUB. */
470 accumulator = XEXP (mla, 2);
472 else if (REG_P (XEXP (mla, 1))
473 && GET_CODE (XEXP (mla, 2)) == NEG
474 && (REG_P (XEXP (mla, 0))
475 || GET_CODE (XEXP (mla, 0)) == NEG))
477 /* FNMADD/FNMSUB. */
478 accumulator = XEXP (XEXP (mla, 2), 0);
480 else
481 return 0;
482 break;
484 default:
485 /* Not an MLA-like operation. */
486 return 0;
489 if (SUBREG_P (accumulator))
490 accumulator = SUBREG_REG (accumulator);
492 if (!REG_P (accumulator))
493 return 0;
495 return (REGNO (dest) == REGNO (accumulator));
498 /* Return non-zero if the consumer (a multiply-accumulate instruction)
499 has an accumulator dependency on the result of the producer (a
500 multiplication instruction) and no other dependency on that result. */
502 arm_mac_accumulator_is_mul_result (rtx producer, rtx consumer)
504 rtx mul = PATTERN (producer);
505 rtx mac = PATTERN (consumer);
506 rtx mul_result;
507 rtx mac_op0, mac_op1, mac_acc;
509 if (GET_CODE (mul) == COND_EXEC)
510 mul = COND_EXEC_CODE (mul);
511 if (GET_CODE (mac) == COND_EXEC)
512 mac = COND_EXEC_CODE (mac);
514 /* Check that mul is of the form (set (...) (mult ...))
515 and mla is of the form (set (...) (plus (mult ...) (...))). */
516 if ((GET_CODE (mul) != SET || GET_CODE (XEXP (mul, 1)) != MULT)
517 || (GET_CODE (mac) != SET || GET_CODE (XEXP (mac, 1)) != PLUS
518 || GET_CODE (XEXP (XEXP (mac, 1), 0)) != MULT))
519 return 0;
521 mul_result = XEXP (mul, 0);
522 mac_op0 = XEXP (XEXP (XEXP (mac, 1), 0), 0);
523 mac_op1 = XEXP (XEXP (XEXP (mac, 1), 0), 1);
524 mac_acc = XEXP (XEXP (mac, 1), 1);
526 return (reg_overlap_mentioned_p (mul_result, mac_acc)
527 && !reg_overlap_mentioned_p (mul_result, mac_op0)
528 && !reg_overlap_mentioned_p (mul_result, mac_op1));
531 /* Worker function for TARGET_MD_ASM_ADJUST.
532 We implement asm flag outputs. */
534 rtx_insn *
535 arm_md_asm_adjust (vec<rtx> &outputs, vec<rtx> & /*inputs*/,
536 vec<machine_mode> & /*input_modes*/,
537 vec<const char *> &constraints, vec<rtx> & /*clobbers*/,
538 HARD_REG_SET & /*clobbered_regs*/, location_t loc)
540 bool saw_asm_flag = false;
542 start_sequence ();
543 for (unsigned i = 0, n = outputs.length (); i < n; ++i)
545 const char *con = constraints[i];
546 if (!startswith (con, "=@cc"))
547 continue;
548 con += 4;
549 if (strchr (con, ',') != NULL)
551 error_at (loc, "alternatives not allowed in %<asm%> flag output");
552 continue;
555 machine_mode mode;
556 rtx_code code;
557 int con01 = 0;
559 #define C(X, Y) (unsigned char)(X) * 256 + (unsigned char)(Y)
561 /* All of the condition codes are two characters. */
562 if (con[0] != 0 && con[1] != 0 && con[2] == 0)
563 con01 = C(con[0], con[1]);
565 switch (con01)
567 case C('c', 'c'):
568 case C('l', 'o'):
569 mode = CC_Cmode, code = GEU;
570 break;
571 case C('c', 's'):
572 case C('h', 's'):
573 mode = CC_Cmode, code = LTU;
574 break;
575 case C('e', 'q'):
576 mode = CC_NZmode, code = EQ;
577 break;
578 case C('g', 'e'):
579 mode = CCmode, code = GE;
580 break;
581 case C('g', 't'):
582 mode = CCmode, code = GT;
583 break;
584 case C('h', 'i'):
585 mode = CCmode, code = GTU;
586 break;
587 case C('l', 'e'):
588 mode = CCmode, code = LE;
589 break;
590 case C('l', 's'):
591 mode = CCmode, code = LEU;
592 break;
593 case C('l', 't'):
594 mode = CCmode, code = LT;
595 break;
596 case C('m', 'i'):
597 mode = CC_NZmode, code = LT;
598 break;
599 case C('n', 'e'):
600 mode = CC_NZmode, code = NE;
601 break;
602 case C('p', 'l'):
603 mode = CC_NZmode, code = GE;
604 break;
605 case C('v', 'c'):
606 mode = CC_Vmode, code = EQ;
607 break;
608 case C('v', 's'):
609 mode = CC_Vmode, code = NE;
610 break;
611 default:
612 error_at (loc, "unknown %<asm%> flag output %qs", constraints[i]);
613 continue;
616 #undef C
618 rtx dest = outputs[i];
619 machine_mode dest_mode = GET_MODE (dest);
620 if (!SCALAR_INT_MODE_P (dest_mode))
622 error_at (loc, "invalid type for %<asm%> flag output");
623 continue;
626 if (!saw_asm_flag)
628 /* This is the first asm flag output. Here we put the flags
629 register in as the real output and adjust the condition to
630 allow it. */
631 constraints[i] = "=c";
632 outputs[i] = gen_rtx_REG (CCmode, CC_REGNUM);
633 saw_asm_flag = true;
635 else
637 /* We don't need the flags register as output twice. */
638 constraints[i] = "=X";
639 outputs[i] = gen_rtx_SCRATCH (word_mode);
642 rtx x = gen_rtx_REG (mode, CC_REGNUM);
643 x = gen_rtx_fmt_ee (code, word_mode, x, const0_rtx);
645 if (dest_mode == word_mode && REG_P (dest))
646 emit_insn (gen_rtx_SET (dest, x));
647 else
649 rtx tmp = gen_reg_rtx (word_mode);
650 emit_insn (gen_rtx_SET (tmp, x));
652 tmp = convert_modes (dest_mode, word_mode, tmp, true);
653 emit_move_insn (dest, tmp);
656 rtx_insn *seq = get_insns ();
657 end_sequence ();
659 return saw_asm_flag ? seq : NULL;
662 #define BRANCH_PROTECT_STR_MAX 255
663 extern char *accepted_branch_protection_string;
665 static enum aarch_parse_opt_result
666 aarch_handle_no_branch_protection (char* str, char* rest)
668 aarch_ra_sign_scope = AARCH_FUNCTION_NONE;
669 aarch_enable_bti = 0;
670 if (rest)
672 error ("unexpected %<%s%> after %<%s%>", rest, str);
673 return AARCH_PARSE_INVALID_FEATURE;
675 return AARCH_PARSE_OK;
678 static enum aarch_parse_opt_result
679 aarch_handle_standard_branch_protection (char* str, char* rest)
681 aarch_ra_sign_scope = AARCH_FUNCTION_NON_LEAF;
682 aarch_ra_sign_key = AARCH_KEY_A;
683 aarch_enable_bti = 1;
684 if (rest)
686 error ("unexpected %<%s%> after %<%s%>", rest, str);
687 return AARCH_PARSE_INVALID_FEATURE;
689 return AARCH_PARSE_OK;
692 static enum aarch_parse_opt_result
693 aarch_handle_pac_ret_protection (char* str ATTRIBUTE_UNUSED,
694 char* rest ATTRIBUTE_UNUSED)
696 aarch_ra_sign_scope = AARCH_FUNCTION_NON_LEAF;
697 aarch_ra_sign_key = AARCH_KEY_A;
698 return AARCH_PARSE_OK;
701 static enum aarch_parse_opt_result
702 aarch_handle_pac_ret_leaf (char* str ATTRIBUTE_UNUSED,
703 char* rest ATTRIBUTE_UNUSED)
705 aarch_ra_sign_scope = AARCH_FUNCTION_ALL;
706 return AARCH_PARSE_OK;
709 static enum aarch_parse_opt_result
710 aarch_handle_pac_ret_b_key (char* str ATTRIBUTE_UNUSED,
711 char* rest ATTRIBUTE_UNUSED)
713 aarch_ra_sign_key = AARCH_KEY_B;
714 return AARCH_PARSE_OK;
717 static enum aarch_parse_opt_result
718 aarch_handle_bti_protection (char* str ATTRIBUTE_UNUSED,
719 char* rest ATTRIBUTE_UNUSED)
721 aarch_enable_bti = 1;
722 return AARCH_PARSE_OK;
725 static const struct aarch_branch_protect_type aarch_pac_ret_subtypes[] = {
726 { "leaf", aarch_handle_pac_ret_leaf, NULL, 0 },
727 { "b-key", aarch_handle_pac_ret_b_key, NULL, 0 },
728 { NULL, NULL, NULL, 0 }
731 static const struct aarch_branch_protect_type aarch_branch_protect_types[] = {
732 { "none", aarch_handle_no_branch_protection, NULL, 0 },
733 { "standard", aarch_handle_standard_branch_protection, NULL, 0 },
734 { "pac-ret", aarch_handle_pac_ret_protection, aarch_pac_ret_subtypes,
735 ARRAY_SIZE (aarch_pac_ret_subtypes) },
736 { "bti", aarch_handle_bti_protection, NULL, 0 },
737 { NULL, NULL, NULL, 0 }
740 /* Parses CONST_STR for branch protection features specified in
741 aarch64_branch_protect_types, and set any global variables required. Returns
742 the parsing result and assigns LAST_STR to the last processed token from
743 CONST_STR so that it can be used for error reporting. */
745 enum aarch_parse_opt_result
746 aarch_parse_branch_protection (const char *const_str, char** last_str)
748 char *str_root = xstrdup (const_str);
749 char* token_save = NULL;
750 char *str = strtok_r (str_root, "+", &token_save);
751 enum aarch_parse_opt_result res = AARCH_PARSE_OK;
752 if (!str)
753 res = AARCH_PARSE_MISSING_ARG;
754 else
756 char *next_str = strtok_r (NULL, "+", &token_save);
757 /* Reset the branch protection features to their defaults. */
758 aarch_handle_no_branch_protection (NULL, NULL);
760 while (str && res == AARCH_PARSE_OK)
762 const aarch_branch_protect_type* type = aarch_branch_protect_types;
763 bool found = false;
764 /* Search for this type. */
765 while (type && type->name && !found && res == AARCH_PARSE_OK)
767 if (strcmp (str, type->name) == 0)
769 found = true;
770 res = type->handler (str, next_str);
771 str = next_str;
772 next_str = strtok_r (NULL, "+", &token_save);
774 else
775 type++;
777 if (found && res == AARCH_PARSE_OK)
779 bool found_subtype = true;
780 /* Loop through each token until we find one that isn't a
781 subtype. */
782 while (found_subtype)
784 found_subtype = false;
785 const aarch_branch_protect_type *subtype = type->subtypes;
786 /* Search for the subtype. */
787 while (str && subtype && subtype->name && !found_subtype
788 && res == AARCH_PARSE_OK)
790 if (strcmp (str, subtype->name) == 0)
792 found_subtype = true;
793 res = subtype->handler (str, next_str);
794 str = next_str;
795 next_str = strtok_r (NULL, "+", &token_save);
797 else
798 subtype++;
802 else if (!found)
803 res = AARCH_PARSE_INVALID_ARG;
806 /* Copy the last processed token into the argument to pass it back.
807 Used by option and attribute validation to print the offending token. */
808 if (last_str)
810 if (str)
811 strcpy (*last_str, str);
812 else
813 *last_str = NULL;
816 if (res == AARCH_PARSE_OK)
818 /* If needed, alloc the accepted string then copy in const_str.
819 Used by override_option_after_change_1. */
820 if (!accepted_branch_protection_string)
821 accepted_branch_protection_string
822 = (char *) xmalloc (BRANCH_PROTECT_STR_MAX + 1);
823 strncpy (accepted_branch_protection_string, const_str,
824 BRANCH_PROTECT_STR_MAX + 1);
825 /* Forcibly null-terminate. */
826 accepted_branch_protection_string[BRANCH_PROTECT_STR_MAX] = '\0';
828 return res;
831 bool
832 aarch_validate_mbranch_protection (const char *const_str)
834 char *str = (char *) xmalloc (strlen (const_str));
835 enum aarch_parse_opt_result res =
836 aarch_parse_branch_protection (const_str, &str);
837 if (res == AARCH_PARSE_INVALID_ARG)
838 error ("invalid argument %<%s%> for %<-mbranch-protection=%>", str);
839 else if (res == AARCH_PARSE_MISSING_ARG)
840 error ("missing argument for %<-mbranch-protection=%>");
841 free (str);
842 return res == AARCH_PARSE_OK;