mm: sched: numa: fix NUMA balancing when !SCHED_DEBUG
[linux-2.6.git] / include / linux / kmemcheck.h
blob39f8453239f779edcd0e5dedfc2d293b34bf7b60
1 #ifndef LINUX_KMEMCHECK_H
2 #define LINUX_KMEMCHECK_H
4 #include <linux/mm_types.h>
5 #include <linux/types.h>
7 #ifdef CONFIG_KMEMCHECK
8 extern int kmemcheck_enabled;
10 /* The slab-related functions. */
11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12 void kmemcheck_free_shadow(struct page *page, int order);
13 void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15 void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
20 void kmemcheck_show_pages(struct page *p, unsigned int n);
21 void kmemcheck_hide_pages(struct page *p, unsigned int n);
23 bool kmemcheck_page_is_tracked(struct page *p);
25 void kmemcheck_mark_unallocated(void *address, unsigned int n);
26 void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27 void kmemcheck_mark_initialized(void *address, unsigned int n);
28 void kmemcheck_mark_freed(void *address, unsigned int n);
30 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32 void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
34 int kmemcheck_show_addr(unsigned long address);
35 int kmemcheck_hide_addr(unsigned long address);
37 bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
40 * Bitfield annotations
42 * How to use: If you have a struct using bitfields, for example
44 * struct a {
45 * int x:8, y:8;
46 * };
48 * then this should be rewritten as
50 * struct a {
51 * kmemcheck_bitfield_begin(flags);
52 * int x:8, y:8;
53 * kmemcheck_bitfield_end(flags);
54 * };
56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
57 * beginning and end, respectively, of the bitfield (and things like
58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59 * fields should be annotated:
61 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62 * kmemcheck_annotate_bitfield(a, flags);
64 #define kmemcheck_bitfield_begin(name) \
65 int name##_begin[0];
67 #define kmemcheck_bitfield_end(name) \
68 int name##_end[0];
70 #define kmemcheck_annotate_bitfield(ptr, name) \
71 do { \
72 int _n; \
74 if (!ptr) \
75 break; \
77 _n = (long) &((ptr)->name##_end) \
78 - (long) &((ptr)->name##_begin); \
79 BUILD_BUG_ON(_n < 0); \
81 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
82 } while (0)
84 #define kmemcheck_annotate_variable(var) \
85 do { \
86 kmemcheck_mark_initialized(&(var), sizeof(var)); \
87 } while (0) \
89 #else
90 #define kmemcheck_enabled 0
92 static inline void
93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
97 static inline void
98 kmemcheck_free_shadow(struct page *page, int order)
102 static inline void
103 kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
104 size_t size)
108 static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
109 size_t size)
113 static inline void kmemcheck_pagealloc_alloc(struct page *p,
114 unsigned int order, gfp_t gfpflags)
118 static inline bool kmemcheck_page_is_tracked(struct page *p)
120 return false;
123 static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
127 static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
131 static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
135 static inline void kmemcheck_mark_freed(void *address, unsigned int n)
139 static inline void kmemcheck_mark_unallocated_pages(struct page *p,
140 unsigned int n)
144 static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
145 unsigned int n)
149 static inline void kmemcheck_mark_initialized_pages(struct page *p,
150 unsigned int n)
154 static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
156 return true;
159 #define kmemcheck_bitfield_begin(name)
160 #define kmemcheck_bitfield_end(name)
161 #define kmemcheck_annotate_bitfield(ptr, name) \
162 do { \
163 } while (0)
165 #define kmemcheck_annotate_variable(var) \
166 do { \
167 } while (0)
169 #endif /* CONFIG_KMEMCHECK */
171 #endif /* LINUX_KMEMCHECK_H */