1 #ifndef _PPC_BYTEORDER_H
2 #define _PPC_BYTEORDER_H
5 * $Id: byteorder.h,v 1.14 1998/08/12 05:07:12 paulus Exp $
13 extern __inline__
unsigned ld_le16(const volatile unsigned short *addr
)
17 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (addr
), "m" (*addr
));
21 extern __inline__
void st_le16(volatile unsigned short *addr
, const unsigned val
)
23 __asm__
__volatile__ ("sthbrx %1,0,%2" : "=m" (*addr
) : "r" (val
), "r" (addr
));
26 extern __inline__
unsigned ld_le32(const volatile unsigned *addr
)
30 __asm__
__volatile__ ("lwbrx %0,0,%1" : "=r" (val
) : "r" (addr
), "m" (*addr
));
34 extern __inline__
void st_le32(volatile unsigned *addr
, const unsigned val
)
36 __asm__
__volatile__ ("stwbrx %1,0,%2" : "=m" (*addr
) : "r" (val
), "r" (addr
));
39 /* alas, egcs sounds like it has a bug in this code that doesn't use the
40 inline asm correctly, and can cause file corruption. Until I hear that
41 it's fixed, I can live without the extra speed. I hope. */
43 static __inline__ __const__ __u16
___arch__swab16(__u16 value
)
47 __asm__("rlwimi %0,%1,8,16,23"
49 : "r" (value
), "0" (value
>> 8));
53 static __inline__ __const__ __u32
___arch__swab32(__u32 value
)
57 __asm__("rlwimi %0,%1,24,16,23\n\t"
58 "rlwimi %0,%1,8,8,15\n\t"
61 : "r" (value
), "0" (value
>> 24));
64 #define __arch__swab32(x) ___arch__swab32(x)
65 #define __arch__swab16(x) ___arch__swab16(x)
68 /* The same, but returns converted value from the location pointer by addr. */
69 #define __arch__swab16p(addr) ld_le16(addr)
70 #define __arch__swab32p(addr) ld_le32(addr)
72 /* The same, but do the conversion in situ, ie. put the value back to addr. */
73 #define __arch__swab16s(addr) st_le16(addr,*addr)
74 #define __arch__swab32s(addr) st_le32(addr,*addr)
76 #ifndef __STRICT_ANSI__
77 #define __BYTEORDER_HAS_U64__
81 #endif /* __KERNEL__ */
83 #include <linux/byteorder/big_endian.h>
85 #endif /* _PPC_BYTEORDER_H */