target/arm: Move expand_pred_b to vec_internal.h
[qemu/ar7.git] / target / arm / vec_internal.h
blobd1a1ea4a668f0af96ae7f0e2d4ed3daa8b082ed7
1 /*
2 * ARM AdvSIMD / SVE Vector Helpers
4 * Copyright (c) 2020 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef TARGET_ARM_VEC_INTERNAL_H
21 #define TARGET_ARM_VEC_INTERNAL_H
24 * Note that vector data is stored in host-endian 64-bit chunks,
25 * so addressing units smaller than that needs a host-endian fixup.
27 * The H<N> macros are used when indexing an array of elements of size N.
29 * The H1_<N> macros are used when performing byte arithmetic and then
30 * casting the final pointer to a type of size N.
32 #if HOST_BIG_ENDIAN
33 #define H1(x) ((x) ^ 7)
34 #define H1_2(x) ((x) ^ 6)
35 #define H1_4(x) ((x) ^ 4)
36 #define H2(x) ((x) ^ 3)
37 #define H4(x) ((x) ^ 1)
38 #else
39 #define H1(x) (x)
40 #define H1_2(x) (x)
41 #define H1_4(x) (x)
42 #define H2(x) (x)
43 #define H4(x) (x)
44 #endif
46 * Access to 64-bit elements isn't host-endian dependent; we provide H8
47 * and H1_8 so that when a function is being generated from a macro we
48 * can pass these rather than an empty macro argument, for clarity.
50 #define H8(x) (x)
51 #define H1_8(x) (x)
54 * Expand active predicate bits to bytes, for byte elements.
56 extern const uint64_t expand_pred_b_data[256];
57 static inline uint64_t expand_pred_b(uint8_t byte)
59 return expand_pred_b_data[byte];
62 static inline void clear_tail(void *vd, uintptr_t opr_sz, uintptr_t max_sz)
64 uint64_t *d = vd + opr_sz;
65 uintptr_t i;
67 for (i = opr_sz; i < max_sz; i += 8) {
68 *d++ = 0;
72 static inline int32_t do_sqrshl_bhs(int32_t src, int32_t shift, int bits,
73 bool round, uint32_t *sat)
75 if (shift <= -bits) {
76 /* Rounding the sign bit always produces 0. */
77 if (round) {
78 return 0;
80 return src >> 31;
81 } else if (shift < 0) {
82 if (round) {
83 src >>= -shift - 1;
84 return (src >> 1) + (src & 1);
86 return src >> -shift;
87 } else if (shift < bits) {
88 int32_t val = src << shift;
89 if (bits == 32) {
90 if (!sat || val >> shift == src) {
91 return val;
93 } else {
94 int32_t extval = sextract32(val, 0, bits);
95 if (!sat || val == extval) {
96 return extval;
99 } else if (!sat || src == 0) {
100 return 0;
103 *sat = 1;
104 return (1u << (bits - 1)) - (src >= 0);
107 static inline uint32_t do_uqrshl_bhs(uint32_t src, int32_t shift, int bits,
108 bool round, uint32_t *sat)
110 if (shift <= -(bits + round)) {
111 return 0;
112 } else if (shift < 0) {
113 if (round) {
114 src >>= -shift - 1;
115 return (src >> 1) + (src & 1);
117 return src >> -shift;
118 } else if (shift < bits) {
119 uint32_t val = src << shift;
120 if (bits == 32) {
121 if (!sat || val >> shift == src) {
122 return val;
124 } else {
125 uint32_t extval = extract32(val, 0, bits);
126 if (!sat || val == extval) {
127 return extval;
130 } else if (!sat || src == 0) {
131 return 0;
134 *sat = 1;
135 return MAKE_64BIT_MASK(0, bits);
138 static inline int32_t do_suqrshl_bhs(int32_t src, int32_t shift, int bits,
139 bool round, uint32_t *sat)
141 if (sat && src < 0) {
142 *sat = 1;
143 return 0;
145 return do_uqrshl_bhs(src, shift, bits, round, sat);
148 static inline int64_t do_sqrshl_d(int64_t src, int64_t shift,
149 bool round, uint32_t *sat)
151 if (shift <= -64) {
152 /* Rounding the sign bit always produces 0. */
153 if (round) {
154 return 0;
156 return src >> 63;
157 } else if (shift < 0) {
158 if (round) {
159 src >>= -shift - 1;
160 return (src >> 1) + (src & 1);
162 return src >> -shift;
163 } else if (shift < 64) {
164 int64_t val = src << shift;
165 if (!sat || val >> shift == src) {
166 return val;
168 } else if (!sat || src == 0) {
169 return 0;
172 *sat = 1;
173 return src < 0 ? INT64_MIN : INT64_MAX;
176 static inline uint64_t do_uqrshl_d(uint64_t src, int64_t shift,
177 bool round, uint32_t *sat)
179 if (shift <= -(64 + round)) {
180 return 0;
181 } else if (shift < 0) {
182 if (round) {
183 src >>= -shift - 1;
184 return (src >> 1) + (src & 1);
186 return src >> -shift;
187 } else if (shift < 64) {
188 uint64_t val = src << shift;
189 if (!sat || val >> shift == src) {
190 return val;
192 } else if (!sat || src == 0) {
193 return 0;
196 *sat = 1;
197 return UINT64_MAX;
200 static inline int64_t do_suqrshl_d(int64_t src, int64_t shift,
201 bool round, uint32_t *sat)
203 if (sat && src < 0) {
204 *sat = 1;
205 return 0;
207 return do_uqrshl_d(src, shift, round, sat);
210 int8_t do_sqrdmlah_b(int8_t, int8_t, int8_t, bool, bool);
211 int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
212 int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
213 int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
216 * 8 x 8 -> 16 vector polynomial multiply where the inputs are
217 * in the low 8 bits of each 16-bit element
219 uint64_t pmull_h(uint64_t op1, uint64_t op2);
221 * 16 x 16 -> 32 vector polynomial multiply where the inputs are
222 * in the low 16 bits of each 32-bit element
224 uint64_t pmull_w(uint64_t op1, uint64_t op2);
226 #endif /* TARGET_ARM_VEC_INTERNAL_H */