32 bit compile fixes
[btrfs-progs-unstable.git] / kerncompat.h
blob287046580fd21912a9df2a615c2008b4bfa643bc
1 #ifndef __KERNCOMPAT
2 #define __KERNCOMPAT
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <endian.h>
8 #include <byteswap.h>
10 #define gfp_t int
11 #define get_cpu_var(p) (p)
12 #define __get_cpu_var(p) (p)
13 #define BITS_PER_LONG (sizeof(long) * 8)
14 #define __GFP_BITS_SHIFT 20
15 #define __GFP_BITS_MASK ((int)((1 << __GFP_BITS_SHIFT) - 1))
16 #define GFP_KERNEL 0
17 #define GFP_NOFS 0
18 #define __read_mostly
19 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
20 #define ULONG_MAX (~0UL)
21 #define BUG() abort()
22 #ifdef __CHECKER__
23 #define __force __attribute__((force))
24 #define __bitwise__ __attribute__((bitwise))
25 #else
26 #define __force
27 #define __bitwise__
28 #endif
30 #ifndef __CHECKER__
31 #include <asm/types.h>
32 typedef __u32 u32;
33 typedef __u64 u64;
34 typedef __u16 u16;
35 typedef __u8 u8;
36 #else
37 typedef unsigned int u32;
38 typedef unsigned int __u32;
39 typedef unsigned long long u64;
40 typedef unsigned char u8;
41 typedef unsigned short u16;
42 #endif
45 struct vma_shared { int prio_tree_node; };
46 struct vm_area_struct {
47 unsigned long vm_pgoff;
48 unsigned long vm_start;
49 unsigned long vm_end;
50 struct vma_shared shared;
52 struct page {
53 unsigned long index;
56 static inline void preempt_enable(void) { do {; } while(0);}
57 static inline void preempt_disable(void) { do {; } while(0);}
59 #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
60 #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
62 /**
63 * __set_bit - Set a bit in memory
64 * @nr: the bit to set
65 * @addr: the address to start counting from
67 * Unlike set_bit(), this function is non-atomic and may be reordered.
68 * If it's called on the same region of memory simultaneously, the effect
69 * may be that only one operation succeeds.
71 static inline void __set_bit(int nr, volatile unsigned long *addr)
73 unsigned long mask = BITOP_MASK(nr);
74 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
76 *p |= mask;
79 static inline void __clear_bit(int nr, volatile unsigned long *addr)
81 unsigned long mask = BITOP_MASK(nr);
82 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
84 *p &= ~mask;
87 /**
88 * test_bit - Determine whether a bit is set
89 * @nr: bit number to test
90 * @addr: Address to start counting from
92 static inline int test_bit(int nr, const volatile unsigned long *addr)
94 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
97 #define BUG_ON(c) do { if (c) abort(); } while (0)
99 #define container_of(ptr, type, member) ({ \
100 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
101 (type *)( (char *)__mptr - __builtin_offsetof(type,member) );})
103 #define ENOMEM 5
104 #define EEXIST 6
106 #ifdef __CHECKER__
107 #define __CHECK_ENDIAN__
108 #define __bitwise __bitwise__
109 #else
110 #define __bitwise
111 #endif
113 typedef u16 __bitwise __le16;
114 typedef u16 __bitwise __be16;
115 typedef u32 __bitwise __le32;
116 typedef u32 __bitwise __be32;
117 typedef u64 __bitwise __le64;
118 typedef u64 __bitwise __be64;
120 #if __BYTE_ORDER == __BIG_ENDIAN
121 #define cpu_to_le64(x) ((__force __le64)(u64)(bswap_64(x)))
122 #define le64_to_cpu(x) ((__force u64)(__le64)(bswap_64(x)))
123 #define cpu_to_le32(x) ((__force __le32)(u32)(bswap_32(x)))
124 #define le32_to_cpu(x) ((__force u32)(__le32)(bswap_32(x)))
125 #define cpu_to_le16(x) ((__force __le16)(u16)(bswap_16(x)))
126 #define le16_to_cpu(x) ((__force u16)(__le16)(bswap_16(x)))
127 #else
128 #define cpu_to_le64(x) ((__force __le64)(u64)(x))
129 #define le64_to_cpu(x) ((__force u64)(__le64)(x))
130 #define cpu_to_le32(x) ((__force __le32)(u32)(x))
131 #define le32_to_cpu(x) ((__force u32)(__le32)(x))
132 #define cpu_to_le16(x) ((__force __le16)(u16)(x))
133 #define le16_to_cpu(x) ((__force u16)(__le16)(x))
134 #endif
135 #endif