2 * AArch64 specific prctl functions for linux-user
4 * SPDX-License-Identifier: GPL-2.0-or-later
6 #ifndef AARCH64_TARGET_PRCTL_H
7 #define AARCH64_TARGET_PRCTL_H
9 #include "target/arm/cpu-features.h"
11 static abi_long
do_prctl_sve_get_vl(CPUArchState
*env
)
13 ARMCPU
*cpu
= env_archcpu(env
);
14 if (cpu_isar_feature(aa64_sve
, cpu
)) {
15 /* PSTATE.SM is always unset on syscall entry. */
16 return sve_vq(env
) * 16;
18 return -TARGET_EINVAL
;
20 #define do_prctl_sve_get_vl do_prctl_sve_get_vl
22 static abi_long
do_prctl_sve_set_vl(CPUArchState
*env
, abi_long arg2
)
25 * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
26 * Note the kernel definition of sve_vl_valid allows for VQ=512,
27 * i.e. VL=8192, even though the current architectural maximum is VQ=16.
29 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))
30 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
33 /* PSTATE.SM is always unset on syscall entry. */
37 * Bound the value of arg2, so that we know that it fits into
38 * the 4-bit field in ZCR_EL1. Rely on the hflags rebuild to
39 * sort out the length supported by the cpu.
41 vq
= MAX(arg2
/ 16, 1);
42 vq
= MIN(vq
, ARM_MAX_VQ
);
43 env
->vfp
.zcr_el
[1] = vq
- 1;
44 arm_rebuild_hflags(env
);
48 aarch64_sve_narrow_vq(env
, vq
);
52 return -TARGET_EINVAL
;
54 #define do_prctl_sve_set_vl do_prctl_sve_set_vl
56 static abi_long
do_prctl_sme_get_vl(CPUArchState
*env
)
58 ARMCPU
*cpu
= env_archcpu(env
);
59 if (cpu_isar_feature(aa64_sme
, cpu
)) {
60 return sme_vq(env
) * 16;
62 return -TARGET_EINVAL
;
64 #define do_prctl_sme_get_vl do_prctl_sme_get_vl
66 static abi_long
do_prctl_sme_set_vl(CPUArchState
*env
, abi_long arg2
)
69 * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
70 * Note the kernel definition of sve_vl_valid allows for VQ=512,
71 * i.e. VL=8192, even though the architectural maximum is VQ=16.
73 if (cpu_isar_feature(aa64_sme
, env_archcpu(env
))
74 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
80 * Bound the value of vq, so that we know that it fits into
81 * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
82 * on syscall entry, we are not modifying the current SVE
85 vq
= MAX(arg2
/ 16, 1);
88 FIELD_DP64(env
->vfp
.smcr_el
[1], SMCR
, LEN
, vq
- 1);
90 /* Delay rebuilding hflags until we know if ZA must change. */
91 vq
= sve_vqm1_for_el_sm(env
, 0, true) + 1;
95 * PSTATE.ZA state is cleared on any change to SVL.
96 * We need not call arm_rebuild_hflags because PSTATE.SM was
97 * cleared on syscall entry, so this hasn't changed VL.
99 env
->svcr
= FIELD_DP64(env
->svcr
, SVCR
, ZA
, 0);
100 arm_rebuild_hflags(env
);
104 return -TARGET_EINVAL
;
106 #define do_prctl_sme_set_vl do_prctl_sme_set_vl
108 static abi_long
do_prctl_reset_keys(CPUArchState
*env
, abi_long arg2
)
110 ARMCPU
*cpu
= env_archcpu(env
);
112 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
113 int all
= (PR_PAC_APIAKEY
| PR_PAC_APIBKEY
|
114 PR_PAC_APDAKEY
| PR_PAC_APDBKEY
| PR_PAC_APGAKEY
);
120 } else if (arg2
& ~all
) {
121 return -TARGET_EINVAL
;
123 if (arg2
& PR_PAC_APIAKEY
) {
124 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
125 sizeof(ARMPACKey
), &err
);
127 if (arg2
& PR_PAC_APIBKEY
) {
128 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
129 sizeof(ARMPACKey
), &err
);
131 if (arg2
& PR_PAC_APDAKEY
) {
132 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
133 sizeof(ARMPACKey
), &err
);
135 if (arg2
& PR_PAC_APDBKEY
) {
136 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
137 sizeof(ARMPACKey
), &err
);
139 if (arg2
& PR_PAC_APGAKEY
) {
140 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
141 sizeof(ARMPACKey
), &err
);
145 * Some unknown failure in the crypto. The best
146 * we can do is log it and fail the syscall.
147 * The real syscall cannot fail this way.
149 qemu_log_mask(LOG_UNIMP
, "PR_PAC_RESET_KEYS: Crypto failure: %s",
150 error_get_pretty(err
));
156 return -TARGET_EINVAL
;
158 #define do_prctl_reset_keys do_prctl_reset_keys
160 static abi_long
do_prctl_set_tagged_addr_ctrl(CPUArchState
*env
, abi_long arg2
)
162 abi_ulong valid_mask
= PR_TAGGED_ADDR_ENABLE
;
163 ARMCPU
*cpu
= env_archcpu(env
);
165 if (cpu_isar_feature(aa64_mte
, cpu
)) {
166 valid_mask
|= PR_MTE_TCF_MASK
;
167 valid_mask
|= PR_MTE_TAG_MASK
;
170 if (arg2
& ~valid_mask
) {
171 return -TARGET_EINVAL
;
173 env
->tagged_addr_enable
= arg2
& PR_TAGGED_ADDR_ENABLE
;
175 if (cpu_isar_feature(aa64_mte
, cpu
)) {
177 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
179 * The kernel has a per-cpu configuration for the sysadmin,
180 * /sys/devices/system/cpu/cpu<N>/mte_tcf_preferred,
181 * which qemu does not implement.
183 * Because there is no performance difference between the modes, and
184 * because SYNC is most useful for debugging MTE errors, choose SYNC
185 * as the preferred mode. With this preference, and the way the API
186 * uses only two bits, there is no way for the program to select
190 if (arg2
& PR_MTE_TCF_SYNC
) {
192 } else if (arg2
& PR_MTE_TCF_ASYNC
) {
195 env
->cp15
.sctlr_el
[1] = deposit64(env
->cp15
.sctlr_el
[1], 38, 2, tcf
);
198 * Write PR_MTE_TAG to GCR_EL1[Exclude].
199 * Note that the syscall uses an include mask,
200 * and hardware uses an exclude mask -- invert.
203 deposit64(env
->cp15
.gcr_el1
, 0, 16, ~arg2
>> PR_MTE_TAG_SHIFT
);
204 arm_rebuild_hflags(env
);
208 #define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
210 static abi_long
do_prctl_get_tagged_addr_ctrl(CPUArchState
*env
)
212 ARMCPU
*cpu
= env_archcpu(env
);
215 if (env
->tagged_addr_enable
) {
216 ret
|= PR_TAGGED_ADDR_ENABLE
;
218 if (cpu_isar_feature(aa64_mte
, cpu
)) {
219 /* See do_prctl_set_tagged_addr_ctrl. */
220 ret
|= extract64(env
->cp15
.sctlr_el
[1], 38, 2) << PR_MTE_TCF_SHIFT
;
221 ret
= deposit64(ret
, PR_MTE_TAG_SHIFT
, 16, ~env
->cp15
.gcr_el1
);
225 #define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
227 #endif /* AARCH64_TARGET_PRCTL_H */