2 Unix SMB/CIFS implementation.
4 Copyright (C) Andrew Tridgell 1992-1998
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
24 This file implements macros for machine independent short and
27 Here is a description of this file that I emailed to the samba list once:
29 > I am confused about the way that byteorder.h works in Samba. I have
30 > looked at it, and I would have thought that you might make a distinction
31 > between LE and BE machines, but you only seem to distinguish between 386
32 > and all other architectures.
34 > Can you give me a clue?
38 Ok, now to the macros themselves. I'll take a simple example, say we
39 want to extract a 2 byte integer from a SMB packet and put it into a
40 type called uint16_t that is in the local machines byte order, and you
41 want to do it with only the assumption that uint16_t is _at_least_ 16
42 bits long (this last condition is very important for architectures
43 that don't have any int types that are 2 bytes long)
47 #define CVAL(buf,pos) (((uint8_t *)(buf))[pos])
48 #define PVAL(buf,pos) ((unsigned int)CVAL(buf,pos))
49 #define SVAL(buf,pos) (PVAL(buf,pos)|PVAL(buf,(pos)+1)<<8)
51 then to extract a uint16_t value at offset 25 in a buffer you do this:
53 char *buffer = foo_bar();
54 uint16_t xx = SVAL(buffer,25);
56 We are using the byteoder independence of the ANSI C bitshifts to do
57 the work. A good optimising compiler should turn this into efficient
58 code, especially if it happens to have the right byteorder :-)
60 I know these macros can be made a bit tidier by removing some of the
61 casts, but you need to look at byteorder.h as a whole to see the
62 reasoning behind them. byteorder.h defines the following macros:
64 SVAL(buf,pos) - extract a 2 byte SMB value
65 IVAL(buf,pos) - extract a 4 byte SMB value
66 BVAL(buf,pos) - extract a 8 byte SMB value
67 SVALS(buf,pos) - signed version of SVAL()
68 IVALS(buf,pos) - signed version of IVAL()
69 BVALS(buf,pos) - signed version of BVAL()
71 SSVAL(buf,pos,val) - put a 2 byte SMB value into a buffer
72 SIVAL(buf,pos,val) - put a 4 byte SMB value into a buffer
73 SBVAL(buf,pos,val) - put a 8 byte SMB value into a buffer
74 SSVALS(buf,pos,val) - signed version of SSVAL()
75 SIVALS(buf,pos,val) - signed version of SIVAL()
76 SBVALS(buf,pos,val) - signed version of SBVAL()
78 RSVAL(buf,pos) - like SVAL() but for NMB byte ordering
79 RSVALS(buf,pos) - like SVALS() but for NMB byte ordering
80 RIVAL(buf,pos) - like IVAL() but for NMB byte ordering
81 RIVALS(buf,pos) - like IVALS() but for NMB byte ordering
82 RSSVAL(buf,pos,val) - like SSVAL() but for NMB ordering
83 RSIVAL(buf,pos,val) - like SIVAL() but for NMB ordering
84 RSIVALS(buf,pos,val) - like SIVALS() but for NMB ordering
86 it also defines lots of intermediate macros, just ignore those :-)
92 * On powerpc we can use the magic instructions to load/store in little endian.
93 * The instructions are reverse-indexing, so assume a big endian Power
94 * processor. Power8 can be big or little endian, so we need to explicitly
97 #if (defined(__powerpc__) && defined(__GNUC__) && HAVE_BIG_ENDIAN)
98 static __inline__
uint16_t ld_le16(const uint16_t *addr
)
101 __asm__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (addr
), "m" (*addr
));
105 static __inline__
void st_le16(uint16_t *addr
, const uint16_t val
)
107 __asm__ ("sthbrx %1,0,%2" : "=m" (*addr
) : "r" (val
), "r" (addr
));
110 static __inline__
uint32_t ld_le32(const uint32_t *addr
)
113 __asm__ ("lwbrx %0,0,%1" : "=r" (val
) : "r" (addr
), "m" (*addr
));
117 static __inline__
void st_le32(uint32_t *addr
, const uint32_t val
)
119 __asm__ ("stwbrx %1,0,%2" : "=m" (*addr
) : "r" (val
), "r" (addr
));
121 #define HAVE_ASM_BYTEORDER 1
123 #define HAVE_ASM_BYTEORDER 0
126 #define CVAL(buf,pos) ((unsigned int)(((const uint8_t *)(buf))[pos]))
127 #define CVAL_NC(buf,pos) (((uint8_t *)(buf))[pos]) /* Non-const version of CVAL */
128 #define PVAL(buf,pos) (CVAL(buf,pos))
129 #define SCVAL(buf,pos,val) (CVAL_NC(buf,pos) = (val))
131 #if HAVE_ASM_BYTEORDER
133 #define _PTRPOS(buf,pos) (((const uint8_t *)(buf))+(pos))
134 #define SVAL(buf,pos) ld_le16((const uint16_t *)_PTRPOS(buf,pos))
135 #define IVAL(buf,pos) ld_le32((const uint32_t *)_PTRPOS(buf,pos))
136 #define SSVAL(buf,pos,val) st_le16((uint16_t *)_PTRPOS(buf,pos), val)
137 #define SIVAL(buf,pos,val) st_le32((uint32_t *)_PTRPOS(buf,pos), val)
138 #define SVALS(buf,pos) ((int16_t)SVAL(buf,pos))
139 #define IVALS(buf,pos) ((int32_t)IVAL(buf,pos))
140 #define SSVALS(buf,pos,val) SSVAL((buf),(pos),((int16_t)(val)))
141 #define SIVALS(buf,pos,val) SIVAL((buf),(pos),((int32_t)(val)))
143 #else /* not HAVE_ASM_BYTEORDER */
145 #define SVAL(buf,pos) (PVAL(buf,pos)|PVAL(buf,(pos)+1)<<8)
146 #define IVAL(buf,pos) (SVAL(buf,pos)|SVAL(buf,(pos)+2)<<16)
147 #define SSVALX(buf,pos,val) (CVAL_NC(buf,pos)=(uint8_t)((val)&0xFF),CVAL_NC(buf,pos+1)=(uint8_t)((val)>>8))
148 #define SIVALX(buf,pos,val) (SSVALX(buf,pos,val&0xFFFF),SSVALX(buf,pos+2,val>>16))
149 #define SVALS(buf,pos) ((int16_t)SVAL(buf,pos))
150 #define IVALS(buf,pos) ((int32_t)IVAL(buf,pos))
151 #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((uint16_t)(val)))
152 #define SIVAL(buf,pos,val) SIVALX((buf),(pos),((uint32_t)(val)))
153 #define SSVALS(buf,pos,val) SSVALX((buf),(pos),((int16_t)(val)))
154 #define SIVALS(buf,pos,val) SIVALX((buf),(pos),((int32_t)(val)))
156 #endif /* not HAVE_ASM_BYTEORDER */
159 #define BVAL(p, ofs) (IVAL(p,ofs) | (((uint64_t)IVAL(p,(ofs)+4)) << 32))
160 #define BVALS(p, ofs) ((int64_t)BVAL(p,ofs))
161 #define SBVAL(p, ofs, v) (SIVAL(p,ofs,(v)&0xFFFFFFFF), SIVAL(p,(ofs)+4,((uint64_t)(v))>>32))
162 #define SBVALS(p, ofs, v) (SBVAL(p,ofs,(uint64_t)v))
164 /* now the reverse routines - these are used in nmb packets (mostly) */
165 #define SREV(x) ((((x)&0xFF)<<8) | (((x)>>8)&0xFF))
166 #define IREV(x) ((SREV(x)<<16) | (SREV((x)>>16)))
167 #define BREV(x) ((IREV((uint64_t)x)<<32) | (IREV(((uint64_t)x)>>32)))
169 #define RSVAL(buf,pos) SREV(SVAL(buf,pos))
170 #define RSVALS(buf,pos) SREV(SVALS(buf,pos))
171 #define RIVAL(buf,pos) IREV(IVAL(buf,pos))
172 #define RIVALS(buf,pos) IREV(IVALS(buf,pos))
173 #define RBVAL(buf,pos) BREV(BVAL(buf,pos))
174 #define RBVALS(buf,pos) BREV(BVALS(buf,pos))
175 #define RSSVAL(buf,pos,val) SSVAL(buf,pos,SREV(val))
176 #define RSSVALS(buf,pos,val) SSVALS(buf,pos,SREV(val))
177 #define RSIVAL(buf,pos,val) SIVAL(buf,pos,IREV(val))
178 #define RSIVALS(buf,pos,val) SIVALS(buf,pos,IREV(val))
179 #define RSBVAL(buf,pos,val) SBVAL(buf,pos,BREV(val))
180 #define RSBVALS(buf,pos,val) SBVALS(buf,pos,BREV(val))
182 /* Alignment macros. */
183 #define ALIGN4(p,base) ((p) + ((4 - (PTR_DIFF((p), (base)) & 3)) & 3))
184 #define ALIGN2(p,base) ((p) + ((2 - (PTR_DIFF((p), (base)) & 1)) & 1))
187 /* macros for accessing SMB protocol elements */
188 #define VWV(vwv) ((vwv)*2)
190 #endif /* _BYTEORDER_H */