1 /* Initialize CPU feature data. AArch64 version.
2 This file is part of the GNU C Library.
3 Copyright (C) 2017-2023 Free Software Foundation, Inc.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <cpu-features.h>
21 #include <elf/dl-hwcaps.h>
22 #include <sys/prctl.h>
24 #define DCZID_DZP_MASK (1 << 4)
25 #define DCZID_BS_MASK (0xf)
27 /* The maximal set of permitted tags that the MTE random tag generation
28 instruction may use. We exclude tag 0 because a) we want to reserve
29 that for the libc heap structures and b) because it makes it easier
30 to see when pointer have been correctly tagged. */
31 #define MTE_ALLOWED_TAGS (0xfffe << PR_MTE_TAG_SHIFT)
40 static struct cpu_list cpu_list
[] = {
41 {"falkor", 0x510FC000},
42 {"thunderxt88", 0x430F0A10},
43 {"thunderx2t99", 0x431F0AF0},
44 {"thunderx2t99p1", 0x420F5160},
45 {"phecda", 0x680F0000},
48 {"kunpeng920", 0x481FD010},
49 {"a64fx", 0x460F0010},
54 get_midr_from_mcpu (const char *mcpu
)
56 for (int i
= 0; i
< sizeof (cpu_list
) / sizeof (struct cpu_list
); i
++)
57 if (strcmp (mcpu
, cpu_list
[i
].name
) == 0)
58 return cpu_list
[i
].midr
;
65 init_cpu_features (struct cpu_features
*cpu_features
)
67 register uint64_t midr
= UINT64_MAX
;
70 /* Get the tunable override. */
71 const char *mcpu
= TUNABLE_GET (glibc
, cpu
, name
, const char *, NULL
);
73 midr
= get_midr_from_mcpu (mcpu
);
76 /* If there was no useful tunable override, query the MIDR if the kernel
78 if (midr
== UINT64_MAX
)
80 if (GLRO (dl_hwcap
) & HWCAP_CPUID
)
81 asm volatile ("mrs %0, midr_el1" : "=r"(midr
));
86 cpu_features
->midr_el1
= midr
;
88 /* Check if ZVA is enabled. */
90 asm volatile ("mrs %0, dczid_el0" : "=r"(dczid
));
92 if ((dczid
& DCZID_DZP_MASK
) == 0)
93 cpu_features
->zva_size
= 4 << (dczid
& DCZID_BS_MASK
);
95 /* Check if BTI is supported. */
96 cpu_features
->bti
= GLRO (dl_hwcap2
) & HWCAP2_BTI
;
98 /* Setup memory tagging support if the HW and kernel support it, and if
99 the user has requested it. */
100 cpu_features
->mte_state
= 0;
104 int mte_state
= TUNABLE_GET (glibc
, mem
, tagging
, unsigned, 0);
105 cpu_features
->mte_state
= (GLRO (dl_hwcap2
) & HWCAP2_MTE
) ? mte_state
: 0;
106 /* If we lack the MTE feature, disable the tunable, since it will
107 otherwise cause instructions that won't run on this CPU to be used. */
108 TUNABLE_SET (glibc
, mem
, tagging
, cpu_features
->mte_state
);
111 if (cpu_features
->mte_state
& 4)
112 /* Enable choosing system-preferred faulting mode. */
113 __prctl (PR_SET_TAGGED_ADDR_CTRL
,
114 (PR_TAGGED_ADDR_ENABLE
| PR_MTE_TCF_SYNC
| PR_MTE_TCF_ASYNC
117 else if (cpu_features
->mte_state
& 2)
118 __prctl (PR_SET_TAGGED_ADDR_CTRL
,
119 (PR_TAGGED_ADDR_ENABLE
| PR_MTE_TCF_SYNC
| MTE_ALLOWED_TAGS
),
121 else if (cpu_features
->mte_state
)
122 __prctl (PR_SET_TAGGED_ADDR_CTRL
,
123 (PR_TAGGED_ADDR_ENABLE
| PR_MTE_TCF_ASYNC
| MTE_ALLOWED_TAGS
),
127 /* Check if SVE is supported. */
128 cpu_features
->sve
= GLRO (dl_hwcap
) & HWCAP_SVE
;