2 * Let's make sure we always have a sane definition for ntohl()/htonl().
3 * Some libraries define those as a function call, just to perform byte
4 * shifting, bringing significant overhead to what should be a simple
9 * Default version that the compiler ought to optimize properly with
12 static inline uint32_t default_swab32(uint32_t val
)
14 return (((val
& 0xff000000) >> 24) |
15 ((val
& 0x00ff0000) >> 8) |
16 ((val
& 0x0000ff00) << 8) |
17 ((val
& 0x000000ff) << 24));
20 static inline uint64_t default_bswap64(uint64_t val
)
22 return (((val
& (uint64_t)0x00000000000000ffULL
) << 56) |
23 ((val
& (uint64_t)0x000000000000ff00ULL
) << 40) |
24 ((val
& (uint64_t)0x0000000000ff0000ULL
) << 24) |
25 ((val
& (uint64_t)0x00000000ff000000ULL
) << 8) |
26 ((val
& (uint64_t)0x000000ff00000000ULL
) >> 8) |
27 ((val
& (uint64_t)0x0000ff0000000000ULL
) >> 24) |
28 ((val
& (uint64_t)0x00ff000000000000ULL
) >> 40) |
29 ((val
& (uint64_t)0xff00000000000000ULL
) >> 56));
35 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
37 #define bswap32 git_bswap32
38 static inline uint32_t git_bswap32(uint32_t x
)
41 if (__builtin_constant_p(x
))
42 result
= default_swab32(x
);
44 __asm__("bswap %0" : "=r" (result
) : "0" (x
));
48 #define bswap64 git_bswap64
49 #if defined(__x86_64__)
50 static inline uint64_t git_bswap64(uint64_t x
)
53 if (__builtin_constant_p(x
))
54 result
= default_bswap64(x
);
56 __asm__("bswap %q0" : "=r" (result
) : "0" (x
));
60 static inline uint64_t git_bswap64(uint64_t x
)
62 union { uint64_t i64
; uint32_t i32
[2]; } tmp
, result
;
63 if (__builtin_constant_p(x
))
64 result
.i64
= default_bswap64(x
);
67 result
.i32
[0] = git_bswap32(tmp
.i32
[1]);
68 result
.i32
[1] = git_bswap32(tmp
.i32
[0]);
74 #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
78 #define bswap32(x) _byteswap_ulong(x)
79 #define bswap64(x) _byteswap_uint64(x)
87 #define ntohl(x) bswap32(x)
88 #define htonl(x) bswap32(x)
96 #define ntohll(x) bswap64(x)
97 #define htonll(x) bswap64(x)
104 #if !defined(__BYTE_ORDER)
105 # if defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
106 # define __BYTE_ORDER BYTE_ORDER
107 # define __LITTLE_ENDIAN LITTLE_ENDIAN
108 # define __BIG_ENDIAN BIG_ENDIAN
112 #if !defined(__BYTE_ORDER)
113 # error "Cannot determine endianness"
116 #if __BYTE_ORDER == __BIG_ENDIAN
117 # define ntohll(n) (n)
118 # define htonll(n) (n)
120 # define ntohll(n) default_bswap64(n)
121 # define htonll(n) default_bswap64(n)
127 * Performance might be improved if the CPU architecture is OK with
128 * unaligned 32-bit loads and a fast ntohl() is available.
129 * Otherwise fall back to byte loads and shifts which is portable,
130 * and is faster on architectures with memory alignment issues.
133 #if defined(__i386__) || defined(__x86_64__) || \
134 defined(_M_IX86) || defined(_M_X64) || \
135 defined(__ppc__) || defined(__ppc64__) || \
136 defined(__powerpc__) || defined(__powerpc64__) || \
137 defined(__s390__) || defined(__s390x__)
139 #define get_be16(p) ntohs(*(unsigned short *)(p))
140 #define get_be32(p) ntohl(*(unsigned int *)(p))
141 #define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
145 #define get_be16(p) ( \
146 (*((unsigned char *)(p) + 0) << 8) | \
147 (*((unsigned char *)(p) + 1) << 0) )
148 #define get_be32(p) ( \
149 (*((unsigned char *)(p) + 0) << 24) | \
150 (*((unsigned char *)(p) + 1) << 16) | \
151 (*((unsigned char *)(p) + 2) << 8) | \
152 (*((unsigned char *)(p) + 3) << 0) )
153 #define put_be32(p, v) do { \
154 unsigned int __v = (v); \
155 *((unsigned char *)(p) + 0) = __v >> 24; \
156 *((unsigned char *)(p) + 1) = __v >> 16; \
157 *((unsigned char *)(p) + 2) = __v >> 8; \
158 *((unsigned char *)(p) + 3) = __v >> 0; } while (0)