1 #ifndef _LINUX_MEMPOLICY_H
2 #define _LINUX_MEMPOLICY_H 1
4 #include <linux/errno.h>
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
12 #define MPOL_DEFAULT 0
13 #define MPOL_PREFERRED 1
15 #define MPOL_INTERLEAVE 3
17 #define MPOL_MAX MPOL_INTERLEAVE
19 /* Flags for get_mem_policy */
20 #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
21 #define MPOL_F_ADDR (1<<1) /* look up vma using address */
24 #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
25 #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
26 #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
27 #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
31 #include <linux/config.h>
32 #include <linux/mmzone.h>
33 #include <linux/slab.h>
34 #include <linux/rbtree.h>
35 #include <linux/spinlock.h>
36 #include <linux/nodemask.h>
38 struct vm_area_struct
;
43 * Describe a memory policy.
45 * A mempolicy can be either associated with a process or with a VMA.
46 * For VMA related allocations the VMA policy is preferred, otherwise
47 * the process policy is used. Interrupts ignore the memory policy
48 * of the current process.
50 * Locking policy for interlave:
51 * In process context there is no locking because only the process accesses
52 * its own state. All vma manipulation is somewhat protected by a down_read on
56 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
57 * All other policies don't have any external state. mpol_free() handles this.
59 * Copying policy objects:
60 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
64 short policy
; /* See MPOL_* above */
66 struct zonelist
*zonelist
; /* bind */
67 short preferred_node
; /* preferred */
68 nodemask_t nodes
; /* interleave */
69 /* undefined for default */
74 * Support for managing mempolicy data objects (clone, copy, destroy)
75 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
78 extern void __mpol_free(struct mempolicy
*pol
);
79 static inline void mpol_free(struct mempolicy
*pol
)
85 extern struct mempolicy
*__mpol_copy(struct mempolicy
*pol
);
86 static inline struct mempolicy
*mpol_copy(struct mempolicy
*pol
)
89 pol
= __mpol_copy(pol
);
93 #define vma_policy(vma) ((vma)->vm_policy)
94 #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
96 static inline void mpol_get(struct mempolicy
*pol
)
99 atomic_inc(&pol
->refcnt
);
102 extern int __mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
);
103 static inline int mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
107 return __mpol_equal(a
, b
);
109 #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
111 /* Could later add inheritance of the process policy here. */
113 #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
116 * Tree of shared policies for a shared memory region.
117 * Maintain the policies in a pseudo mm that contains vmas. The vmas
118 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
119 * bytes, so that we can work with shared memory segments bigger than
125 unsigned long start
, end
;
126 struct mempolicy
*policy
;
129 struct shared_policy
{
134 static inline void mpol_shared_policy_init(struct shared_policy
*info
)
136 info
->root
= RB_ROOT
;
137 spin_lock_init(&info
->lock
);
140 int mpol_set_shared_policy(struct shared_policy
*info
,
141 struct vm_area_struct
*vma
,
142 struct mempolicy
*new);
143 void mpol_free_shared_policy(struct shared_policy
*p
);
144 struct mempolicy
*mpol_shared_policy_lookup(struct shared_policy
*sp
,
147 struct mempolicy
*get_vma_policy(struct task_struct
*task
,
148 struct vm_area_struct
*vma
, unsigned long addr
);
150 extern void numa_default_policy(void);
151 extern void numa_policy_init(void);
152 extern void numa_policy_rebind(const nodemask_t
*old
, const nodemask_t
*new);
153 extern struct mempolicy default_policy
;
154 extern struct zonelist
*huge_zonelist(struct vm_area_struct
*vma
,
157 extern int policy_zone
;
159 static inline void check_highest_zone(int k
)
169 static inline int mpol_equal(struct mempolicy
*a
, struct mempolicy
*b
)
173 #define vma_mpol_equal(a,b) 1
175 #define mpol_set_vma_default(vma) do {} while(0)
177 static inline void mpol_free(struct mempolicy
*p
)
181 static inline void mpol_get(struct mempolicy
*pol
)
185 static inline struct mempolicy
*mpol_copy(struct mempolicy
*old
)
190 struct shared_policy
{};
192 static inline int mpol_set_shared_policy(struct shared_policy
*info
,
193 struct vm_area_struct
*vma
,
194 struct mempolicy
*new)
199 static inline void mpol_shared_policy_init(struct shared_policy
*info
)
203 static inline void mpol_free_shared_policy(struct shared_policy
*p
)
207 static inline struct mempolicy
*
208 mpol_shared_policy_lookup(struct shared_policy
*sp
, unsigned long idx
)
213 #define vma_policy(vma) NULL
214 #define vma_set_policy(vma, pol) do {} while(0)
216 static inline void numa_policy_init(void)
220 static inline void numa_default_policy(void)
224 static inline void numa_policy_rebind(const nodemask_t
*old
,
225 const nodemask_t
*new)
229 static inline struct zonelist
*huge_zonelist(struct vm_area_struct
*vma
,
232 return NODE_DATA(0)->node_zonelists
+ gfp_zone(GFP_HIGHUSER
);
235 static inline void check_highest_zone(int k
)
238 #endif /* CONFIG_NUMA */
239 #endif /* __KERNEL__ */