isa: add isa_address_space()
[qemu/wangdongxu.git] / bswap.h
blobf41bebed83026ccf5698706343605b553f2e8b14
1 #ifndef BSWAP_H
2 #define BSWAP_H
4 #include "config-host.h"
6 #include <inttypes.h>
8 #ifdef CONFIG_MACHINE_BSWAP_H
9 #include <sys/endian.h>
10 #include <sys/types.h>
11 #include <machine/bswap.h>
12 #else
14 #include "softfloat.h"
16 #ifdef CONFIG_BYTESWAP_H
17 #include <byteswap.h>
18 #else
20 #define bswap_16(x) \
21 ({ \
22 uint16_t __x = (x); \
23 ((uint16_t)( \
24 (((uint16_t)(__x) & (uint16_t)0x00ffU) << 8) | \
25 (((uint16_t)(__x) & (uint16_t)0xff00U) >> 8) )); \
28 #define bswap_32(x) \
29 ({ \
30 uint32_t __x = (x); \
31 ((uint32_t)( \
32 (((uint32_t)(__x) & (uint32_t)0x000000ffUL) << 24) | \
33 (((uint32_t)(__x) & (uint32_t)0x0000ff00UL) << 8) | \
34 (((uint32_t)(__x) & (uint32_t)0x00ff0000UL) >> 8) | \
35 (((uint32_t)(__x) & (uint32_t)0xff000000UL) >> 24) )); \
38 #define bswap_64(x) \
39 ({ \
40 uint64_t __x = (x); \
41 ((uint64_t)( \
42 (uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000000000ffULL) << 56) | \
43 (uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000000000ff00ULL) << 40) | \
44 (uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000000000ff0000ULL) << 24) | \
45 (uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000ff000000ULL) << 8) | \
46 (uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000ff00000000ULL) >> 8) | \
47 (uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000ff0000000000ULL) >> 24) | \
48 (uint64_t)(((uint64_t)(__x) & (uint64_t)0x00ff000000000000ULL) >> 40) | \
49 (uint64_t)(((uint64_t)(__x) & (uint64_t)0xff00000000000000ULL) >> 56) )); \
52 #endif /* !CONFIG_BYTESWAP_H */
54 static inline uint16_t bswap16(uint16_t x)
56 return bswap_16(x);
59 static inline uint32_t bswap32(uint32_t x)
61 return bswap_32(x);
64 static inline uint64_t bswap64(uint64_t x)
66 return bswap_64(x);
69 #endif /* ! CONFIG_MACHINE_BSWAP_H */
71 static inline void bswap16s(uint16_t *s)
73 *s = bswap16(*s);
76 static inline void bswap32s(uint32_t *s)
78 *s = bswap32(*s);
81 static inline void bswap64s(uint64_t *s)
83 *s = bswap64(*s);
86 #if defined(HOST_WORDS_BIGENDIAN)
87 #define be_bswap(v, size) (v)
88 #define le_bswap(v, size) bswap ## size(v)
89 #define be_bswaps(v, size)
90 #define le_bswaps(p, size) *p = bswap ## size(*p);
91 #else
92 #define le_bswap(v, size) (v)
93 #define be_bswap(v, size) bswap ## size(v)
94 #define le_bswaps(v, size)
95 #define be_bswaps(p, size) *p = bswap ## size(*p);
96 #endif
98 #define CPU_CONVERT(endian, size, type)\
99 static inline type endian ## size ## _to_cpu(type v)\
101 return endian ## _bswap(v, size);\
104 static inline type cpu_to_ ## endian ## size(type v)\
106 return endian ## _bswap(v, size);\
109 static inline void endian ## size ## _to_cpus(type *p)\
111 endian ## _bswaps(p, size)\
114 static inline void cpu_to_ ## endian ## size ## s(type *p)\
116 endian ## _bswaps(p, size)\
119 static inline type endian ## size ## _to_cpup(const type *p)\
121 return endian ## size ## _to_cpu(*p);\
124 static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\
126 *p = cpu_to_ ## endian ## size(v);\
129 CPU_CONVERT(be, 16, uint16_t)
130 CPU_CONVERT(be, 32, uint32_t)
131 CPU_CONVERT(be, 64, uint64_t)
133 CPU_CONVERT(le, 16, uint16_t)
134 CPU_CONVERT(le, 32, uint32_t)
135 CPU_CONVERT(le, 64, uint64_t)
137 /* unaligned versions (optimized for frequent unaligned accesses)*/
139 #if defined(__i386__) || defined(_ARCH_PPC)
141 #define cpu_to_le16wu(p, v) cpu_to_le16w(p, v)
142 #define cpu_to_le32wu(p, v) cpu_to_le32w(p, v)
143 #define le16_to_cpupu(p) le16_to_cpup(p)
144 #define le32_to_cpupu(p) le32_to_cpup(p)
145 #define be32_to_cpupu(p) be32_to_cpup(p)
147 #define cpu_to_be16wu(p, v) cpu_to_be16w(p, v)
148 #define cpu_to_be32wu(p, v) cpu_to_be32w(p, v)
149 #define cpu_to_be64wu(p, v) cpu_to_be64w(p, v)
151 #else
153 static inline void cpu_to_le16wu(uint16_t *p, uint16_t v)
155 uint8_t *p1 = (uint8_t *)p;
157 p1[0] = v & 0xff;
158 p1[1] = v >> 8;
161 static inline void cpu_to_le32wu(uint32_t *p, uint32_t v)
163 uint8_t *p1 = (uint8_t *)p;
165 p1[0] = v & 0xff;
166 p1[1] = v >> 8;
167 p1[2] = v >> 16;
168 p1[3] = v >> 24;
171 static inline uint16_t le16_to_cpupu(const uint16_t *p)
173 const uint8_t *p1 = (const uint8_t *)p;
174 return p1[0] | (p1[1] << 8);
177 static inline uint32_t le32_to_cpupu(const uint32_t *p)
179 const uint8_t *p1 = (const uint8_t *)p;
180 return p1[0] | (p1[1] << 8) | (p1[2] << 16) | (p1[3] << 24);
183 static inline uint32_t be32_to_cpupu(const uint32_t *p)
185 const uint8_t *p1 = (const uint8_t *)p;
186 return p1[3] | (p1[2] << 8) | (p1[1] << 16) | (p1[0] << 24);
189 static inline void cpu_to_be16wu(uint16_t *p, uint16_t v)
191 uint8_t *p1 = (uint8_t *)p;
193 p1[0] = v >> 8;
194 p1[1] = v & 0xff;
197 static inline void cpu_to_be32wu(uint32_t *p, uint32_t v)
199 uint8_t *p1 = (uint8_t *)p;
201 p1[0] = v >> 24;
202 p1[1] = v >> 16;
203 p1[2] = v >> 8;
204 p1[3] = v & 0xff;
207 static inline void cpu_to_be64wu(uint64_t *p, uint64_t v)
209 uint8_t *p1 = (uint8_t *)p;
211 p1[0] = v >> 56;
212 p1[1] = v >> 48;
213 p1[2] = v >> 40;
214 p1[3] = v >> 32;
215 p1[4] = v >> 24;
216 p1[5] = v >> 16;
217 p1[6] = v >> 8;
218 p1[7] = v & 0xff;
221 #endif
223 #ifdef HOST_WORDS_BIGENDIAN
224 #define cpu_to_32wu cpu_to_be32wu
225 #define leul_to_cpu(v) glue(glue(le,HOST_LONG_BITS),_to_cpu)(v)
226 #else
227 #define cpu_to_32wu cpu_to_le32wu
228 #define leul_to_cpu(v) (v)
229 #endif
231 #undef le_bswap
232 #undef be_bswap
233 #undef le_bswaps
234 #undef be_bswaps
236 /* len must be one of 1, 2, 4 */
237 static inline uint32_t qemu_bswap_len(uint32_t value, int len)
239 return bswap32(value) >> (32 - 8 * len);
242 typedef union {
243 float32 f;
244 uint32_t l;
245 } CPU_FloatU;
247 typedef union {
248 float64 d;
249 #if defined(HOST_WORDS_BIGENDIAN)
250 struct {
251 uint32_t upper;
252 uint32_t lower;
253 } l;
254 #else
255 struct {
256 uint32_t lower;
257 uint32_t upper;
258 } l;
259 #endif
260 uint64_t ll;
261 } CPU_DoubleU;
263 typedef union {
264 floatx80 d;
265 struct {
266 uint64_t lower;
267 uint16_t upper;
268 } l;
269 } CPU_LDoubleU;
271 typedef union {
272 float128 q;
273 #if defined(HOST_WORDS_BIGENDIAN)
274 struct {
275 uint32_t upmost;
276 uint32_t upper;
277 uint32_t lower;
278 uint32_t lowest;
279 } l;
280 struct {
281 uint64_t upper;
282 uint64_t lower;
283 } ll;
284 #else
285 struct {
286 uint32_t lowest;
287 uint32_t lower;
288 uint32_t upper;
289 uint32_t upmost;
290 } l;
291 struct {
292 uint64_t lower;
293 uint64_t upper;
294 } ll;
295 #endif
296 } CPU_QuadU;
298 /* unaligned/endian-independent pointer access */
301 * the generic syntax is:
303 * load: ld{type}{sign}{size}{endian}_p(ptr)
305 * store: st{type}{size}{endian}_p(ptr, val)
307 * Note there are small differences with the softmmu access API!
309 * type is:
310 * (empty): integer access
311 * f : float access
313 * sign is:
314 * (empty): for floats or 32 bit size
315 * u : unsigned
316 * s : signed
318 * size is:
319 * b: 8 bits
320 * w: 16 bits
321 * l: 32 bits
322 * q: 64 bits
324 * endian is:
325 * (empty): 8 bit access
326 * be : big endian
327 * le : little endian
329 static inline int ldub_p(const void *ptr)
331 return *(uint8_t *)ptr;
334 static inline int ldsb_p(const void *ptr)
336 return *(int8_t *)ptr;
339 static inline void stb_p(void *ptr, int v)
341 *(uint8_t *)ptr = v;
344 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
345 kernel handles unaligned load/stores may give better results, but
346 it is a system wide setting : bad */
347 #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
349 /* conservative code for little endian unaligned accesses */
350 static inline int lduw_le_p(const void *ptr)
352 #ifdef _ARCH_PPC
353 int val;
354 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
355 return val;
356 #else
357 const uint8_t *p = ptr;
358 return p[0] | (p[1] << 8);
359 #endif
362 static inline int ldsw_le_p(const void *ptr)
364 #ifdef _ARCH_PPC
365 int val;
366 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
367 return (int16_t)val;
368 #else
369 const uint8_t *p = ptr;
370 return (int16_t)(p[0] | (p[1] << 8));
371 #endif
374 static inline int ldl_le_p(const void *ptr)
376 #ifdef _ARCH_PPC
377 int val;
378 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
379 return val;
380 #else
381 const uint8_t *p = ptr;
382 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
383 #endif
386 static inline uint64_t ldq_le_p(const void *ptr)
388 const uint8_t *p = ptr;
389 uint32_t v1, v2;
390 v1 = ldl_le_p(p);
391 v2 = ldl_le_p(p + 4);
392 return v1 | ((uint64_t)v2 << 32);
395 static inline void stw_le_p(void *ptr, int v)
397 #ifdef _ARCH_PPC
398 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
399 #else
400 uint8_t *p = ptr;
401 p[0] = v;
402 p[1] = v >> 8;
403 #endif
406 static inline void stl_le_p(void *ptr, int v)
408 #ifdef _ARCH_PPC
409 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
410 #else
411 uint8_t *p = ptr;
412 p[0] = v;
413 p[1] = v >> 8;
414 p[2] = v >> 16;
415 p[3] = v >> 24;
416 #endif
419 static inline void stq_le_p(void *ptr, uint64_t v)
421 uint8_t *p = ptr;
422 stl_le_p(p, (uint32_t)v);
423 stl_le_p(p + 4, v >> 32);
426 /* float access */
428 static inline float32 ldfl_le_p(const void *ptr)
430 union {
431 float32 f;
432 uint32_t i;
433 } u;
434 u.i = ldl_le_p(ptr);
435 return u.f;
438 static inline void stfl_le_p(void *ptr, float32 v)
440 union {
441 float32 f;
442 uint32_t i;
443 } u;
444 u.f = v;
445 stl_le_p(ptr, u.i);
448 static inline float64 ldfq_le_p(const void *ptr)
450 CPU_DoubleU u;
451 u.l.lower = ldl_le_p(ptr);
452 u.l.upper = ldl_le_p(ptr + 4);
453 return u.d;
456 static inline void stfq_le_p(void *ptr, float64 v)
458 CPU_DoubleU u;
459 u.d = v;
460 stl_le_p(ptr, u.l.lower);
461 stl_le_p(ptr + 4, u.l.upper);
464 #else
466 static inline int lduw_le_p(const void *ptr)
468 return *(uint16_t *)ptr;
471 static inline int ldsw_le_p(const void *ptr)
473 return *(int16_t *)ptr;
476 static inline int ldl_le_p(const void *ptr)
478 return *(uint32_t *)ptr;
481 static inline uint64_t ldq_le_p(const void *ptr)
483 return *(uint64_t *)ptr;
486 static inline void stw_le_p(void *ptr, int v)
488 *(uint16_t *)ptr = v;
491 static inline void stl_le_p(void *ptr, int v)
493 *(uint32_t *)ptr = v;
496 static inline void stq_le_p(void *ptr, uint64_t v)
498 *(uint64_t *)ptr = v;
501 /* float access */
503 static inline float32 ldfl_le_p(const void *ptr)
505 return *(float32 *)ptr;
508 static inline float64 ldfq_le_p(const void *ptr)
510 return *(float64 *)ptr;
513 static inline void stfl_le_p(void *ptr, float32 v)
515 *(float32 *)ptr = v;
518 static inline void stfq_le_p(void *ptr, float64 v)
520 *(float64 *)ptr = v;
522 #endif
524 #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
526 static inline int lduw_be_p(const void *ptr)
528 #if defined(__i386__)
529 int val;
530 asm volatile ("movzwl %1, %0\n"
531 "xchgb %b0, %h0\n"
532 : "=q" (val)
533 : "m" (*(uint16_t *)ptr));
534 return val;
535 #else
536 const uint8_t *b = ptr;
537 return ((b[0] << 8) | b[1]);
538 #endif
541 static inline int ldsw_be_p(const void *ptr)
543 #if defined(__i386__)
544 int val;
545 asm volatile ("movzwl %1, %0\n"
546 "xchgb %b0, %h0\n"
547 : "=q" (val)
548 : "m" (*(uint16_t *)ptr));
549 return (int16_t)val;
550 #else
551 const uint8_t *b = ptr;
552 return (int16_t)((b[0] << 8) | b[1]);
553 #endif
556 static inline int ldl_be_p(const void *ptr)
558 #if defined(__i386__) || defined(__x86_64__)
559 int val;
560 asm volatile ("movl %1, %0\n"
561 "bswap %0\n"
562 : "=r" (val)
563 : "m" (*(uint32_t *)ptr));
564 return val;
565 #else
566 const uint8_t *b = ptr;
567 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
568 #endif
571 static inline uint64_t ldq_be_p(const void *ptr)
573 uint32_t a,b;
574 a = ldl_be_p(ptr);
575 b = ldl_be_p((uint8_t *)ptr + 4);
576 return (((uint64_t)a<<32)|b);
579 static inline void stw_be_p(void *ptr, int v)
581 #if defined(__i386__)
582 asm volatile ("xchgb %b0, %h0\n"
583 "movw %w0, %1\n"
584 : "=q" (v)
585 : "m" (*(uint16_t *)ptr), "0" (v));
586 #else
587 uint8_t *d = (uint8_t *) ptr;
588 d[0] = v >> 8;
589 d[1] = v;
590 #endif
593 static inline void stl_be_p(void *ptr, int v)
595 #if defined(__i386__) || defined(__x86_64__)
596 asm volatile ("bswap %0\n"
597 "movl %0, %1\n"
598 : "=r" (v)
599 : "m" (*(uint32_t *)ptr), "0" (v));
600 #else
601 uint8_t *d = (uint8_t *) ptr;
602 d[0] = v >> 24;
603 d[1] = v >> 16;
604 d[2] = v >> 8;
605 d[3] = v;
606 #endif
609 static inline void stq_be_p(void *ptr, uint64_t v)
611 stl_be_p(ptr, v >> 32);
612 stl_be_p((uint8_t *)ptr + 4, v);
615 /* float access */
617 static inline float32 ldfl_be_p(const void *ptr)
619 union {
620 float32 f;
621 uint32_t i;
622 } u;
623 u.i = ldl_be_p(ptr);
624 return u.f;
627 static inline void stfl_be_p(void *ptr, float32 v)
629 union {
630 float32 f;
631 uint32_t i;
632 } u;
633 u.f = v;
634 stl_be_p(ptr, u.i);
637 static inline float64 ldfq_be_p(const void *ptr)
639 CPU_DoubleU u;
640 u.l.upper = ldl_be_p(ptr);
641 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
642 return u.d;
645 static inline void stfq_be_p(void *ptr, float64 v)
647 CPU_DoubleU u;
648 u.d = v;
649 stl_be_p(ptr, u.l.upper);
650 stl_be_p((uint8_t *)ptr + 4, u.l.lower);
653 #else
655 static inline int lduw_be_p(const void *ptr)
657 return *(uint16_t *)ptr;
660 static inline int ldsw_be_p(const void *ptr)
662 return *(int16_t *)ptr;
665 static inline int ldl_be_p(const void *ptr)
667 return *(uint32_t *)ptr;
670 static inline uint64_t ldq_be_p(const void *ptr)
672 return *(uint64_t *)ptr;
675 static inline void stw_be_p(void *ptr, int v)
677 *(uint16_t *)ptr = v;
680 static inline void stl_be_p(void *ptr, int v)
682 *(uint32_t *)ptr = v;
685 static inline void stq_be_p(void *ptr, uint64_t v)
687 *(uint64_t *)ptr = v;
690 /* float access */
692 static inline float32 ldfl_be_p(const void *ptr)
694 return *(float32 *)ptr;
697 static inline float64 ldfq_be_p(const void *ptr)
699 return *(float64 *)ptr;
702 static inline void stfl_be_p(void *ptr, float32 v)
704 *(float32 *)ptr = v;
707 static inline void stfq_be_p(void *ptr, float64 v)
709 *(float64 *)ptr = v;
712 #endif
714 #endif /* BSWAP_H */