osdep: include glib-compat.h before other QEMU headers
[qemu/ar7.git] / include / qemu / bswap.h
blob4aaf992b5d7ad77b7c62e97441cb112070f1cd6c
1 #ifndef BSWAP_H
2 #define BSWAP_H
4 #include "fpu/softfloat-types.h"
6 #ifdef CONFIG_MACHINE_BSWAP_H
7 # include <sys/endian.h>
8 # include <machine/bswap.h>
9 #elif defined(__FreeBSD__)
10 # include <sys/endian.h>
11 #elif defined(__HAIKU__)
12 # include <endian.h>
13 #elif defined(CONFIG_BYTESWAP_H)
14 # include <byteswap.h>
16 static inline uint16_t bswap16(uint16_t x)
18 return bswap_16(x);
21 static inline uint32_t bswap32(uint32_t x)
23 return bswap_32(x);
26 static inline uint64_t bswap64(uint64_t x)
28 return bswap_64(x);
30 # else
31 static inline uint16_t bswap16(uint16_t x)
33 return (((x & 0x00ff) << 8) |
34 ((x & 0xff00) >> 8));
37 static inline uint32_t bswap32(uint32_t x)
39 return (((x & 0x000000ffU) << 24) |
40 ((x & 0x0000ff00U) << 8) |
41 ((x & 0x00ff0000U) >> 8) |
42 ((x & 0xff000000U) >> 24));
45 static inline uint64_t bswap64(uint64_t x)
47 return (((x & 0x00000000000000ffULL) << 56) |
48 ((x & 0x000000000000ff00ULL) << 40) |
49 ((x & 0x0000000000ff0000ULL) << 24) |
50 ((x & 0x00000000ff000000ULL) << 8) |
51 ((x & 0x000000ff00000000ULL) >> 8) |
52 ((x & 0x0000ff0000000000ULL) >> 24) |
53 ((x & 0x00ff000000000000ULL) >> 40) |
54 ((x & 0xff00000000000000ULL) >> 56));
56 #endif /* ! CONFIG_MACHINE_BSWAP_H */
58 static inline void bswap16s(uint16_t *s)
60 *s = bswap16(*s);
63 static inline void bswap32s(uint32_t *s)
65 *s = bswap32(*s);
68 static inline void bswap64s(uint64_t *s)
70 *s = bswap64(*s);
73 #if defined(HOST_WORDS_BIGENDIAN)
74 #define be_bswap(v, size) (v)
75 #define le_bswap(v, size) glue(bswap, size)(v)
76 #define be_bswaps(v, size)
77 #define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
78 #else
79 #define le_bswap(v, size) (v)
80 #define be_bswap(v, size) glue(bswap, size)(v)
81 #define le_bswaps(v, size)
82 #define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
83 #endif
85 /**
86 * Endianness conversion functions between host cpu and specified endianness.
87 * (We list the complete set of prototypes produced by the macros below
88 * to assist people who search the headers to find their definitions.)
90 * uint16_t le16_to_cpu(uint16_t v);
91 * uint32_t le32_to_cpu(uint32_t v);
92 * uint64_t le64_to_cpu(uint64_t v);
93 * uint16_t be16_to_cpu(uint16_t v);
94 * uint32_t be32_to_cpu(uint32_t v);
95 * uint64_t be64_to_cpu(uint64_t v);
97 * Convert the value @v from the specified format to the native
98 * endianness of the host CPU by byteswapping if necessary, and
99 * return the converted value.
101 * uint16_t cpu_to_le16(uint16_t v);
102 * uint32_t cpu_to_le32(uint32_t v);
103 * uint64_t cpu_to_le64(uint64_t v);
104 * uint16_t cpu_to_be16(uint16_t v);
105 * uint32_t cpu_to_be32(uint32_t v);
106 * uint64_t cpu_to_be64(uint64_t v);
108 * Convert the value @v from the native endianness of the host CPU to
109 * the specified format by byteswapping if necessary, and return
110 * the converted value.
112 * void le16_to_cpus(uint16_t *v);
113 * void le32_to_cpus(uint32_t *v);
114 * void le64_to_cpus(uint64_t *v);
115 * void be16_to_cpus(uint16_t *v);
116 * void be32_to_cpus(uint32_t *v);
117 * void be64_to_cpus(uint64_t *v);
119 * Do an in-place conversion of the value pointed to by @v from the
120 * specified format to the native endianness of the host CPU.
122 * void cpu_to_le16s(uint16_t *v);
123 * void cpu_to_le32s(uint32_t *v);
124 * void cpu_to_le64s(uint64_t *v);
125 * void cpu_to_be16s(uint16_t *v);
126 * void cpu_to_be32s(uint32_t *v);
127 * void cpu_to_be64s(uint64_t *v);
129 * Do an in-place conversion of the value pointed to by @v from the
130 * native endianness of the host CPU to the specified format.
132 * Both X_to_cpu() and cpu_to_X() perform the same operation; you
133 * should use whichever one is better documenting of the function your
134 * code is performing.
136 * Do not use these functions for conversion of values which are in guest
137 * memory, since the data may not be sufficiently aligned for the host CPU's
138 * load and store instructions. Instead you should use the ld*_p() and
139 * st*_p() functions, which perform loads and stores of data of any
140 * required size and endianness and handle possible misalignment.
143 #define CPU_CONVERT(endian, size, type)\
144 static inline type endian ## size ## _to_cpu(type v)\
146 return glue(endian, _bswap)(v, size);\
149 static inline type cpu_to_ ## endian ## size(type v)\
151 return glue(endian, _bswap)(v, size);\
154 static inline void endian ## size ## _to_cpus(type *p)\
156 glue(endian, _bswaps)(p, size);\
159 static inline void cpu_to_ ## endian ## size ## s(type *p)\
161 glue(endian, _bswaps)(p, size);\
164 CPU_CONVERT(be, 16, uint16_t)
165 CPU_CONVERT(be, 32, uint32_t)
166 CPU_CONVERT(be, 64, uint64_t)
168 CPU_CONVERT(le, 16, uint16_t)
169 CPU_CONVERT(le, 32, uint32_t)
170 CPU_CONVERT(le, 64, uint64_t)
173 * Same as cpu_to_le{16,32}, except that gcc will figure the result is
174 * a compile-time constant if you pass in a constant. So this can be
175 * used to initialize static variables.
177 #if defined(HOST_WORDS_BIGENDIAN)
178 # define const_le32(_x) \
179 ((((_x) & 0x000000ffU) << 24) | \
180 (((_x) & 0x0000ff00U) << 8) | \
181 (((_x) & 0x00ff0000U) >> 8) | \
182 (((_x) & 0xff000000U) >> 24))
183 # define const_le16(_x) \
184 ((((_x) & 0x00ff) << 8) | \
185 (((_x) & 0xff00) >> 8))
186 #else
187 # define const_le32(_x) (_x)
188 # define const_le16(_x) (_x)
189 #endif
191 /* Unions for reinterpreting between floats and integers. */
193 typedef union {
194 float32 f;
195 uint32_t l;
196 } CPU_FloatU;
198 typedef union {
199 float64 d;
200 #if defined(HOST_WORDS_BIGENDIAN)
201 struct {
202 uint32_t upper;
203 uint32_t lower;
204 } l;
205 #else
206 struct {
207 uint32_t lower;
208 uint32_t upper;
209 } l;
210 #endif
211 uint64_t ll;
212 } CPU_DoubleU;
214 typedef union {
215 floatx80 d;
216 struct {
217 uint64_t lower;
218 uint16_t upper;
219 } l;
220 } CPU_LDoubleU;
222 typedef union {
223 float128 q;
224 #if defined(HOST_WORDS_BIGENDIAN)
225 struct {
226 uint32_t upmost;
227 uint32_t upper;
228 uint32_t lower;
229 uint32_t lowest;
230 } l;
231 struct {
232 uint64_t upper;
233 uint64_t lower;
234 } ll;
235 #else
236 struct {
237 uint32_t lowest;
238 uint32_t lower;
239 uint32_t upper;
240 uint32_t upmost;
241 } l;
242 struct {
243 uint64_t lower;
244 uint64_t upper;
245 } ll;
246 #endif
247 } CPU_QuadU;
249 /* unaligned/endian-independent pointer access */
252 * the generic syntax is:
254 * load: ld{type}{sign}{size}_{endian}_p(ptr)
256 * store: st{type}{size}_{endian}_p(ptr, val)
258 * Note there are small differences with the softmmu access API!
260 * type is:
261 * (empty): integer access
262 * f : float access
264 * sign is:
265 * (empty): for 32 or 64 bit sizes (including floats and doubles)
266 * u : unsigned
267 * s : signed
269 * size is:
270 * b: 8 bits
271 * w: 16 bits
272 * l: 32 bits
273 * q: 64 bits
275 * endian is:
276 * he : host endian
277 * be : big endian
278 * le : little endian
279 * te : target endian
280 * (except for byte accesses, which have no endian infix).
282 * The target endian accessors are obviously only available to source
283 * files which are built per-target; they are defined in cpu-all.h.
285 * In all cases these functions take a host pointer.
286 * For accessors that take a guest address rather than a
287 * host address, see the cpu_{ld,st}_* accessors defined in
288 * cpu_ldst.h.
290 * For cases where the size to be used is not fixed at compile time,
291 * there are
292 * stn_{endian}_p(ptr, sz, val)
293 * which stores @val to @ptr as an @endian-order number @sz bytes in size
294 * and
295 * ldn_{endian}_p(ptr, sz)
296 * which loads @sz bytes from @ptr as an unsigned @endian-order number
297 * and returns it in a uint64_t.
300 static inline int ldub_p(const void *ptr)
302 return *(uint8_t *)ptr;
305 static inline int ldsb_p(const void *ptr)
307 return *(int8_t *)ptr;
310 static inline void stb_p(void *ptr, uint8_t v)
312 *(uint8_t *)ptr = v;
316 * Any compiler worth its salt will turn these memcpy into native unaligned
317 * operations. Thus we don't need to play games with packed attributes, or
318 * inline byte-by-byte stores.
319 * Some compilation environments (eg some fortify-source implementations)
320 * may intercept memcpy() in a way that defeats the compiler optimization,
321 * though, so we use __builtin_memcpy() to give ourselves the best chance
322 * of good performance.
325 static inline int lduw_he_p(const void *ptr)
327 uint16_t r;
328 __builtin_memcpy(&r, ptr, sizeof(r));
329 return r;
332 static inline int ldsw_he_p(const void *ptr)
334 int16_t r;
335 __builtin_memcpy(&r, ptr, sizeof(r));
336 return r;
339 static inline void stw_he_p(void *ptr, uint16_t v)
341 __builtin_memcpy(ptr, &v, sizeof(v));
344 static inline int ldl_he_p(const void *ptr)
346 int32_t r;
347 __builtin_memcpy(&r, ptr, sizeof(r));
348 return r;
351 static inline void stl_he_p(void *ptr, uint32_t v)
353 __builtin_memcpy(ptr, &v, sizeof(v));
356 static inline uint64_t ldq_he_p(const void *ptr)
358 uint64_t r;
359 __builtin_memcpy(&r, ptr, sizeof(r));
360 return r;
363 static inline void stq_he_p(void *ptr, uint64_t v)
365 __builtin_memcpy(ptr, &v, sizeof(v));
368 static inline int lduw_le_p(const void *ptr)
370 return (uint16_t)le_bswap(lduw_he_p(ptr), 16);
373 static inline int ldsw_le_p(const void *ptr)
375 return (int16_t)le_bswap(lduw_he_p(ptr), 16);
378 static inline int ldl_le_p(const void *ptr)
380 return le_bswap(ldl_he_p(ptr), 32);
383 static inline uint64_t ldq_le_p(const void *ptr)
385 return le_bswap(ldq_he_p(ptr), 64);
388 static inline void stw_le_p(void *ptr, uint16_t v)
390 stw_he_p(ptr, le_bswap(v, 16));
393 static inline void stl_le_p(void *ptr, uint32_t v)
395 stl_he_p(ptr, le_bswap(v, 32));
398 static inline void stq_le_p(void *ptr, uint64_t v)
400 stq_he_p(ptr, le_bswap(v, 64));
403 static inline int lduw_be_p(const void *ptr)
405 return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
408 static inline int ldsw_be_p(const void *ptr)
410 return (int16_t)be_bswap(lduw_he_p(ptr), 16);
413 static inline int ldl_be_p(const void *ptr)
415 return be_bswap(ldl_he_p(ptr), 32);
418 static inline uint64_t ldq_be_p(const void *ptr)
420 return be_bswap(ldq_he_p(ptr), 64);
423 static inline void stw_be_p(void *ptr, uint16_t v)
425 stw_he_p(ptr, be_bswap(v, 16));
428 static inline void stl_be_p(void *ptr, uint32_t v)
430 stl_he_p(ptr, be_bswap(v, 32));
433 static inline void stq_be_p(void *ptr, uint64_t v)
435 stq_he_p(ptr, be_bswap(v, 64));
438 static inline unsigned long leul_to_cpu(unsigned long v)
440 #if HOST_LONG_BITS == 32
441 return le_bswap(v, 32);
442 #elif HOST_LONG_BITS == 64
443 return le_bswap(v, 64);
444 #else
445 # error Unknown sizeof long
446 #endif
449 /* Store v to p as a sz byte value in host order */
450 #define DO_STN_LDN_P(END) \
451 static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
453 switch (sz) { \
454 case 1: \
455 stb_p(ptr, v); \
456 break; \
457 case 2: \
458 stw_ ## END ## _p(ptr, v); \
459 break; \
460 case 4: \
461 stl_ ## END ## _p(ptr, v); \
462 break; \
463 case 8: \
464 stq_ ## END ## _p(ptr, v); \
465 break; \
466 default: \
467 g_assert_not_reached(); \
470 static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
472 switch (sz) { \
473 case 1: \
474 return ldub_p(ptr); \
475 case 2: \
476 return lduw_ ## END ## _p(ptr); \
477 case 4: \
478 return (uint32_t)ldl_ ## END ## _p(ptr); \
479 case 8: \
480 return ldq_ ## END ## _p(ptr); \
481 default: \
482 g_assert_not_reached(); \
486 DO_STN_LDN_P(he)
487 DO_STN_LDN_P(le)
488 DO_STN_LDN_P(be)
490 #undef DO_STN_LDN_P
492 #undef le_bswap
493 #undef be_bswap
494 #undef le_bswaps
495 #undef be_bswaps
497 #endif /* BSWAP_H */