2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
30 #include <linux/types.h>
39 #define get_cpu_var(p) (p)
40 #define __get_cpu_var(p) (p)
41 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
42 #define __GFP_BITS_SHIFT 20
43 #define __GFP_BITS_MASK ((int)((1 << __GFP_BITS_SHIFT) - 1))
47 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
50 #define ULONG_MAX (~0UL)
53 #define BUG() assert(0)
55 #define __force __attribute__((force))
56 #define __bitwise__ __attribute__((bitwise))
64 * Since we're using primitive definitions from kernel-space, we need to
65 * define __KERNEL__ so that system header files know which definitions
69 #include <asm/types.h>
75 * Continuing to define __KERNEL__ breaks others parts of the code, so
76 * we can just undefine it now that we have the correct headers...
80 typedef unsigned int u32
;
81 typedef unsigned int __u32
;
82 typedef unsigned long long u64
;
83 typedef unsigned char u8
;
84 typedef unsigned short u16
;
88 struct vma_shared
{ int prio_tree_node
; };
89 struct vm_area_struct
{
90 unsigned long vm_pgoff
;
91 unsigned long vm_start
;
93 struct vma_shared shared
;
104 #define mutex_init(m) \
109 static inline void mutex_lock(struct mutex
*m
)
114 static inline void mutex_unlock(struct mutex
*m
)
119 static inline int mutex_is_locked(struct mutex
*m
)
121 return (m
->lock
!= 1);
124 #define cond_resched() do { } while (0)
125 #define preempt_enable() do { } while (0)
126 #define preempt_disable() do { } while (0)
128 #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
129 #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
131 #ifndef __attribute_const__
132 #define __attribute_const__ __attribute__((__const__))
136 * __set_bit - Set a bit in memory
137 * @nr: the bit to set
138 * @addr: the address to start counting from
140 * Unlike set_bit(), this function is non-atomic and may be reordered.
141 * If it's called on the same region of memory simultaneously, the effect
142 * may be that only one operation succeeds.
144 static inline void __set_bit(int nr
, volatile unsigned long *addr
)
146 unsigned long mask
= BITOP_MASK(nr
);
147 unsigned long *p
= ((unsigned long *)addr
) + BITOP_WORD(nr
);
152 static inline void __clear_bit(int nr
, volatile unsigned long *addr
)
154 unsigned long mask
= BITOP_MASK(nr
);
155 unsigned long *p
= ((unsigned long *)addr
) + BITOP_WORD(nr
);
161 * test_bit - Determine whether a bit is set
162 * @nr: bit number to test
163 * @addr: Address to start counting from
165 static inline int test_bit(int nr
, const volatile unsigned long *addr
)
167 return 1UL & (addr
[BITOP_WORD(nr
)] >> (nr
& (BITS_PER_LONG
-1)));
173 #define MAX_ERRNO 4095
174 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
176 static inline void *ERR_PTR(long error
)
178 return (void *) error
;
181 static inline long PTR_ERR(const void *ptr
)
186 static inline long IS_ERR(const void *ptr
)
188 return IS_ERR_VALUE((unsigned long)ptr
);
194 #define min(x,y) ({ \
195 typeof(x) _x = (x); \
196 typeof(y) _y = (y); \
197 (void) (&_x == &_y); \
198 _x < _y ? _x : _y; })
200 #define max(x,y) ({ \
201 typeof(x) _x = (x); \
202 typeof(y) _y = (y); \
203 (void) (&_x == &_y); \
204 _x > _y ? _x : _y; })
206 #define min_t(type,x,y) \
207 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
208 #define max_t(type,x,y) \
209 ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
212 * This looks more complex than it should be. But we need to
213 * get the type for the ~ right in round_down (it needs to be
214 * as wide as the result!), and we want to evaluate the macro
215 * arguments just once each.
217 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
218 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
219 #define round_down(x, y) ((x) & ~__round_mask(x, y))
224 #define printk(fmt, args...) fprintf(stderr, fmt, ##args)
231 #define kmalloc(x, y) malloc(x)
232 #define kzalloc(x, y) calloc(1, x)
233 #define kstrdup(x, y) strdup(x)
234 #define kfree(x) free(x)
236 #define BUG_ON(c) assert(!(c))
237 #define WARN_ON(c) assert(!(c))
240 #define container_of(ptr, type, member) ({ \
241 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
242 (type *)( (char *)__mptr - offsetof(type,member) );})
244 #define __bitwise __bitwise__
249 typedef u16 __bitwise __le16
;
250 typedef u16 __bitwise __be16
;
251 typedef u32 __bitwise __le32
;
252 typedef u32 __bitwise __be32
;
253 typedef u64 __bitwise __le64
;
254 typedef u64 __bitwise __be64
;
256 /* Macros to generate set/get funcs for the struct fields
257 * assume there is a lefoo_to_cpu for every type, so lets make a simple
260 #define le8_to_cpu(v) (v)
261 #define cpu_to_le8(v) (v)
264 #if __BYTE_ORDER == __BIG_ENDIAN
265 #define cpu_to_le64(x) ((__force __le64)(u64)(bswap_64(x)))
266 #define le64_to_cpu(x) ((__force u64)(__le64)(bswap_64(x)))
267 #define cpu_to_le32(x) ((__force __le32)(u32)(bswap_32(x)))
268 #define le32_to_cpu(x) ((__force u32)(__le32)(bswap_32(x)))
269 #define cpu_to_le16(x) ((__force __le16)(u16)(bswap_16(x)))
270 #define le16_to_cpu(x) ((__force u16)(__le16)(bswap_16(x)))
272 #define cpu_to_le64(x) ((__force __le64)(u64)(x))
273 #define le64_to_cpu(x) ((__force u64)(__le64)(x))
274 #define cpu_to_le32(x) ((__force __le32)(u32)(x))
275 #define le32_to_cpu(x) ((__force u32)(__le32)(x))
276 #define cpu_to_le16(x) ((__force __le16)(u16)(x))
277 #define le16_to_cpu(x) ((__force u16)(__le16)(x))
280 struct __una_u16
{ __le16 x
; } __attribute__((__packed__
));
281 struct __una_u32
{ __le32 x
; } __attribute__((__packed__
));
282 struct __una_u64
{ __le64 x
; } __attribute__((__packed__
));
284 #define get_unaligned_le8(p) (*((u8 *)(p)))
285 #define put_unaligned_le8(val,p) ((*((u8 *)(p))) = (val))
286 #define get_unaligned_le16(p) le16_to_cpu(((const struct __una_u16 *)(p))->x)
287 #define put_unaligned_le16(val,p) (((struct __una_u16 *)(p))->x = cpu_to_le16(val))
288 #define get_unaligned_le32(p) le32_to_cpu(((const struct __una_u32 *)(p))->x)
289 #define put_unaligned_le32(val,p) (((struct __una_u32 *)(p))->x = cpu_to_le32(val))
290 #define get_unaligned_le64(p) le64_to_cpu(((const struct __una_u64 *)(p))->x)
291 #define put_unaligned_le64(val,p) (((struct __una_u64 *)(p))->x = cpu_to_le64(val))