2 * AArch64 specific prctl functions for linux-user
4 * SPDX-License-Identifier: GPL-2.0-or-later
6 #ifndef AARCH64_TARGET_PRCTL_H
7 #define AARCH64_TARGET_PRCTL_H
9 #include "target/arm/cpu-features.h"
10 #include "mte_user_helper.h"
12 static abi_long
do_prctl_sve_get_vl(CPUArchState
*env
)
14 ARMCPU
*cpu
= env_archcpu(env
);
15 if (cpu_isar_feature(aa64_sve
, cpu
)) {
16 /* PSTATE.SM is always unset on syscall entry. */
17 return sve_vq(env
) * 16;
19 return -TARGET_EINVAL
;
21 #define do_prctl_sve_get_vl do_prctl_sve_get_vl
23 static abi_long
do_prctl_sve_set_vl(CPUArchState
*env
, abi_long arg2
)
26 * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
27 * Note the kernel definition of sve_vl_valid allows for VQ=512,
28 * i.e. VL=8192, even though the current architectural maximum is VQ=16.
30 if (cpu_isar_feature(aa64_sve
, env_archcpu(env
))
31 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
34 /* PSTATE.SM is always unset on syscall entry. */
38 * Bound the value of arg2, so that we know that it fits into
39 * the 4-bit field in ZCR_EL1. Rely on the hflags rebuild to
40 * sort out the length supported by the cpu.
42 vq
= MAX(arg2
/ 16, 1);
43 vq
= MIN(vq
, ARM_MAX_VQ
);
44 env
->vfp
.zcr_el
[1] = vq
- 1;
45 arm_rebuild_hflags(env
);
49 aarch64_sve_narrow_vq(env
, vq
);
53 return -TARGET_EINVAL
;
55 #define do_prctl_sve_set_vl do_prctl_sve_set_vl
57 static abi_long
do_prctl_sme_get_vl(CPUArchState
*env
)
59 ARMCPU
*cpu
= env_archcpu(env
);
60 if (cpu_isar_feature(aa64_sme
, cpu
)) {
61 return sme_vq(env
) * 16;
63 return -TARGET_EINVAL
;
65 #define do_prctl_sme_get_vl do_prctl_sme_get_vl
67 static abi_long
do_prctl_sme_set_vl(CPUArchState
*env
, abi_long arg2
)
70 * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
71 * Note the kernel definition of sve_vl_valid allows for VQ=512,
72 * i.e. VL=8192, even though the architectural maximum is VQ=16.
74 if (cpu_isar_feature(aa64_sme
, env_archcpu(env
))
75 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
81 * Bound the value of vq, so that we know that it fits into
82 * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
83 * on syscall entry, we are not modifying the current SVE
86 vq
= MAX(arg2
/ 16, 1);
89 FIELD_DP64(env
->vfp
.smcr_el
[1], SMCR
, LEN
, vq
- 1);
91 /* Delay rebuilding hflags until we know if ZA must change. */
92 vq
= sve_vqm1_for_el_sm(env
, 0, true) + 1;
96 * PSTATE.ZA state is cleared on any change to SVL.
97 * We need not call arm_rebuild_hflags because PSTATE.SM was
98 * cleared on syscall entry, so this hasn't changed VL.
100 env
->svcr
= FIELD_DP64(env
->svcr
, SVCR
, ZA
, 0);
101 arm_rebuild_hflags(env
);
105 return -TARGET_EINVAL
;
107 #define do_prctl_sme_set_vl do_prctl_sme_set_vl
109 static abi_long
do_prctl_reset_keys(CPUArchState
*env
, abi_long arg2
)
111 ARMCPU
*cpu
= env_archcpu(env
);
113 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
114 int all
= (PR_PAC_APIAKEY
| PR_PAC_APIBKEY
|
115 PR_PAC_APDAKEY
| PR_PAC_APDBKEY
| PR_PAC_APGAKEY
);
121 } else if (arg2
& ~all
) {
122 return -TARGET_EINVAL
;
124 if (arg2
& PR_PAC_APIAKEY
) {
125 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
126 sizeof(ARMPACKey
), &err
);
128 if (arg2
& PR_PAC_APIBKEY
) {
129 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
130 sizeof(ARMPACKey
), &err
);
132 if (arg2
& PR_PAC_APDAKEY
) {
133 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
134 sizeof(ARMPACKey
), &err
);
136 if (arg2
& PR_PAC_APDBKEY
) {
137 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
138 sizeof(ARMPACKey
), &err
);
140 if (arg2
& PR_PAC_APGAKEY
) {
141 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
142 sizeof(ARMPACKey
), &err
);
146 * Some unknown failure in the crypto. The best
147 * we can do is log it and fail the syscall.
148 * The real syscall cannot fail this way.
150 qemu_log_mask(LOG_UNIMP
, "PR_PAC_RESET_KEYS: Crypto failure: %s",
151 error_get_pretty(err
));
157 return -TARGET_EINVAL
;
159 #define do_prctl_reset_keys do_prctl_reset_keys
161 static abi_long
do_prctl_set_tagged_addr_ctrl(CPUArchState
*env
, abi_long arg2
)
163 abi_ulong valid_mask
= PR_TAGGED_ADDR_ENABLE
;
164 ARMCPU
*cpu
= env_archcpu(env
);
166 if (cpu_isar_feature(aa64_mte
, cpu
)) {
167 valid_mask
|= PR_MTE_TCF_MASK
;
168 valid_mask
|= PR_MTE_TAG_MASK
;
171 if (arg2
& ~valid_mask
) {
172 return -TARGET_EINVAL
;
174 env
->tagged_addr_enable
= arg2
& PR_TAGGED_ADDR_ENABLE
;
176 if (cpu_isar_feature(aa64_mte
, cpu
)) {
177 arm_set_mte_tcf0(env
, arg2
);
180 * Write PR_MTE_TAG to GCR_EL1[Exclude].
181 * Note that the syscall uses an include mask,
182 * and hardware uses an exclude mask -- invert.
185 deposit64(env
->cp15
.gcr_el1
, 0, 16, ~arg2
>> PR_MTE_TAG_SHIFT
);
186 arm_rebuild_hflags(env
);
190 #define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
192 static abi_long
do_prctl_get_tagged_addr_ctrl(CPUArchState
*env
)
194 ARMCPU
*cpu
= env_archcpu(env
);
197 if (env
->tagged_addr_enable
) {
198 ret
|= PR_TAGGED_ADDR_ENABLE
;
200 if (cpu_isar_feature(aa64_mte
, cpu
)) {
201 /* See do_prctl_set_tagged_addr_ctrl. */
202 ret
|= extract64(env
->cp15
.sctlr_el
[1], 38, 2) << PR_MTE_TCF_SHIFT
;
203 ret
= deposit64(ret
, PR_MTE_TAG_SHIFT
, 16, ~env
->cp15
.gcr_el1
);
207 #define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
209 #endif /* AARCH64_TARGET_PRCTL_H */