kernel: Replace the remaining __amd64__ with __x86_64__ for consistency.
[dragonfly.git] / contrib / tcpdump / extract.h
blobbab63f205d777027acfe5d536f4fb63321f15c52
1 /*
2 * Copyright (c) 1992, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * @(#) $Header: /tcpdump/master/tcpdump/extract.h,v 1.25 2006-01-30 16:20:07 hannes Exp $ (LBL)
25 * Macros to extract possibly-unaligned big-endian integral values.
27 #ifdef LBL_ALIGN
29 * The processor doesn't natively handle unaligned loads.
31 #ifdef HAVE___ATTRIBUTE__
33 * We have __attribute__; we assume that means we have __attribute__((packed)).
34 * Declare packed structures containing a u_int16_t and a u_int32_t,
35 * cast the pointer to point to one of those, and fetch through it;
36 * the GCC manual doesn't appear to explicitly say that
37 * __attribute__((packed)) causes the compiler to generate unaligned-safe
38 * code, but it apppears to do so.
40 * We do this in case the compiler can generate, for this instruction set,
41 * better code to do an unaligned load and pass stuff to "ntohs()" or
42 * "ntohl()" than the code to fetch the bytes one at a time and
43 * assemble them. (That might not be the case on a little-endian platform,
44 * where "ntohs()" and "ntohl()" might not be done inline.)
46 typedef struct {
47 u_int16_t val;
48 } __attribute__((packed)) unaligned_u_int16_t;
50 typedef struct {
51 u_int32_t val;
52 } __attribute__((packed)) unaligned_u_int32_t;
54 #define EXTRACT_16BITS(p) \
55 ((u_int16_t)ntohs(((const unaligned_u_int16_t *)(p))->val))
56 #define EXTRACT_32BITS(p) \
57 ((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val))
58 #define EXTRACT_64BITS(p) \
59 ((u_int64_t)(((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 0)->val)) << 32 | \
60 ((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 1)->val)) << 0))
62 #else /* HAVE___ATTRIBUTE__ */
64 * We don't have __attribute__, so do unaligned loads of big-endian
65 * quantities the hard way - fetch the bytes one at a time and
66 * assemble them.
68 #define EXTRACT_16BITS(p) \
69 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \
70 (u_int16_t)*((const u_int8_t *)(p) + 1)))
71 #define EXTRACT_32BITS(p) \
72 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \
73 (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \
74 (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \
75 (u_int32_t)*((const u_int8_t *)(p) + 3)))
76 #define EXTRACT_64BITS(p) \
77 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \
78 (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \
79 (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \
80 (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \
81 (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \
82 (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \
83 (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \
84 (u_int64_t)*((const u_int8_t *)(p) + 7)))
85 #endif /* HAVE___ATTRIBUTE__ */
86 #else /* LBL_ALIGN */
88 * The processor natively handles unaligned loads, so we can just
89 * cast the pointer and fetch through it.
91 #define EXTRACT_16BITS(p) \
92 ((u_int16_t)ntohs(*(const u_int16_t *)(p)))
93 #define EXTRACT_32BITS(p) \
94 ((u_int32_t)ntohl(*(const u_int32_t *)(p)))
95 #define EXTRACT_64BITS(p) \
96 ((u_int64_t)(((u_int64_t)ntohl(*((const u_int32_t *)(p) + 0))) << 32 | \
97 ((u_int64_t)ntohl(*((const u_int32_t *)(p) + 1))) << 0))
98 #endif /* LBL_ALIGN */
100 #define EXTRACT_24BITS(p) \
101 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \
102 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
103 (u_int32_t)*((const u_int8_t *)(p) + 2)))
106 * Macros to extract possibly-unaligned little-endian integral values.
107 * XXX - do loads on little-endian machines that support unaligned loads?
109 #define EXTRACT_LE_8BITS(p) (*(p))
110 #define EXTRACT_LE_16BITS(p) \
111 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \
112 (u_int16_t)*((const u_int8_t *)(p) + 0)))
113 #define EXTRACT_LE_32BITS(p) \
114 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \
115 (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
116 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
117 (u_int32_t)*((const u_int8_t *)(p) + 0)))
118 #define EXTRACT_LE_24BITS(p) \
119 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \
120 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \
121 (u_int32_t)*((const u_int8_t *)(p) + 0)))
122 #define EXTRACT_LE_64BITS(p) \
123 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \
124 (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \
125 (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \
126 (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \
127 (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \
128 (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \
129 (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \
130 (u_int64_t)*((const u_int8_t *)(p) + 0)))