Import 2.3.13pre6
[davej-history.git] / mm / mlock.c
blobd6b19cfb1a963a0ae886739c4119e980ae5e4f2a
1 /*
2 * linux/mm/mlock.c
4 * (C) Copyright 1995 Linus Torvalds
5 */
6 #include <linux/slab.h>
7 #include <linux/shm.h>
8 #include <linux/mman.h>
9 #include <linux/smp_lock.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgtable.h>
14 static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
16 vma->vm_flags = newflags;
17 return 0;
20 static inline int mlock_fixup_start(struct vm_area_struct * vma,
21 unsigned long end, int newflags)
23 struct vm_area_struct * n;
25 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
26 if (!n)
27 return -EAGAIN;
28 *n = *vma;
29 vma->vm_start = end;
30 n->vm_end = end;
31 vma->vm_offset += vma->vm_start - n->vm_start;
32 n->vm_flags = newflags;
33 if (n->vm_file)
34 get_file(n->vm_file);
35 if (n->vm_ops && n->vm_ops->open)
36 n->vm_ops->open(n);
37 insert_vm_struct(current->mm, n);
38 return 0;
41 static inline int mlock_fixup_end(struct vm_area_struct * vma,
42 unsigned long start, int newflags)
44 struct vm_area_struct * n;
46 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
47 if (!n)
48 return -EAGAIN;
49 *n = *vma;
50 vma->vm_end = start;
51 n->vm_start = start;
52 n->vm_offset += n->vm_start - vma->vm_start;
53 n->vm_flags = newflags;
54 if (n->vm_file)
55 get_file(n->vm_file);
56 if (n->vm_ops && n->vm_ops->open)
57 n->vm_ops->open(n);
58 insert_vm_struct(current->mm, n);
59 return 0;
62 static inline int mlock_fixup_middle(struct vm_area_struct * vma,
63 unsigned long start, unsigned long end, int newflags)
65 struct vm_area_struct * left, * right;
67 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
68 if (!left)
69 return -EAGAIN;
70 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
71 if (!right) {
72 kmem_cache_free(vm_area_cachep, left);
73 return -EAGAIN;
75 *left = *vma;
76 *right = *vma;
77 left->vm_end = start;
78 vma->vm_start = start;
79 vma->vm_end = end;
80 right->vm_start = end;
81 vma->vm_offset += vma->vm_start - left->vm_start;
82 right->vm_offset += right->vm_start - left->vm_start;
83 vma->vm_flags = newflags;
84 if (vma->vm_file)
85 atomic_add(2, &vma->vm_file->f_count);
87 if (vma->vm_ops && vma->vm_ops->open) {
88 vma->vm_ops->open(left);
89 vma->vm_ops->open(right);
91 insert_vm_struct(current->mm, left);
92 insert_vm_struct(current->mm, right);
93 return 0;
96 static int mlock_fixup(struct vm_area_struct * vma,
97 unsigned long start, unsigned long end, unsigned int newflags)
99 int pages, retval;
101 if (newflags == vma->vm_flags)
102 return 0;
104 if (start == vma->vm_start) {
105 if (end == vma->vm_end)
106 retval = mlock_fixup_all(vma, newflags);
107 else
108 retval = mlock_fixup_start(vma, end, newflags);
109 } else {
110 if (end == vma->vm_end)
111 retval = mlock_fixup_end(vma, start, newflags);
112 else
113 retval = mlock_fixup_middle(vma, start, end, newflags);
115 if (!retval) {
116 /* keep track of amount of locked VM */
117 pages = (end - start) >> PAGE_SHIFT;
118 if (newflags & VM_LOCKED) {
119 pages = -pages;
120 make_pages_present(start, end);
122 vma->vm_mm->locked_vm -= pages;
124 return retval;
127 static int do_mlock(unsigned long start, size_t len, int on)
129 unsigned long nstart, end, tmp;
130 struct vm_area_struct * vma, * next;
131 int error;
133 if (!capable(CAP_IPC_LOCK))
134 return -EPERM;
135 len = (len + ~PAGE_MASK) & PAGE_MASK;
136 end = start + len;
137 if (end < start)
138 return -EINVAL;
139 if (end == start)
140 return 0;
141 vma = find_vma(current->mm, start);
142 if (!vma || vma->vm_start > start)
143 return -ENOMEM;
145 for (nstart = start ; ; ) {
146 unsigned int newflags;
148 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
150 newflags = vma->vm_flags | VM_LOCKED;
151 if (!on)
152 newflags &= ~VM_LOCKED;
154 if (vma->vm_end >= end) {
155 error = mlock_fixup(vma, nstart, end, newflags);
156 break;
159 tmp = vma->vm_end;
160 next = vma->vm_next;
161 error = mlock_fixup(vma, nstart, tmp, newflags);
162 if (error)
163 break;
164 nstart = tmp;
165 vma = next;
166 if (!vma || vma->vm_start != nstart) {
167 error = -ENOMEM;
168 break;
171 merge_segments(current->mm, start, end);
172 return error;
175 asmlinkage int sys_mlock(unsigned long start, size_t len)
177 unsigned long locked;
178 unsigned long lock_limit;
179 int error = -ENOMEM;
181 down(&current->mm->mmap_sem);
182 len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
183 start &= PAGE_MASK;
185 locked = len >> PAGE_SHIFT;
186 locked += current->mm->locked_vm;
188 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
189 lock_limit >>= PAGE_SHIFT;
191 /* check against resource limits */
192 if (locked > lock_limit)
193 goto out;
195 /* we may lock at most half of physical memory... */
196 /* (this check is pretty bogus, but doesn't hurt) */
197 if (locked > num_physpages/2)
198 goto out;
200 error = do_mlock(start, len, 1);
201 out:
202 up(&current->mm->mmap_sem);
203 return error;
206 asmlinkage int sys_munlock(unsigned long start, size_t len)
208 int ret;
210 down(&current->mm->mmap_sem);
211 len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
212 start &= PAGE_MASK;
213 ret = do_mlock(start, len, 0);
214 up(&current->mm->mmap_sem);
215 return ret;
218 static int do_mlockall(int flags)
220 int error;
221 unsigned int def_flags;
222 struct vm_area_struct * vma;
224 if (!capable(CAP_IPC_LOCK))
225 return -EPERM;
227 def_flags = 0;
228 if (flags & MCL_FUTURE)
229 def_flags = VM_LOCKED;
230 current->mm->def_flags = def_flags;
232 error = 0;
233 for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
234 unsigned int newflags;
236 newflags = vma->vm_flags | VM_LOCKED;
237 if (!(flags & MCL_CURRENT))
238 newflags &= ~VM_LOCKED;
239 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
240 if (error)
241 break;
243 merge_segments(current->mm, 0, TASK_SIZE);
244 return error;
247 asmlinkage int sys_mlockall(int flags)
249 unsigned long lock_limit;
250 int ret = -EINVAL;
252 down(&current->mm->mmap_sem);
253 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
254 goto out;
256 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
257 lock_limit >>= PAGE_SHIFT;
259 ret = -ENOMEM;
260 if (current->mm->total_vm > lock_limit)
261 goto out;
263 /* we may lock at most half of physical memory... */
264 /* (this check is pretty bogus, but doesn't hurt) */
265 if (current->mm->total_vm > num_physpages/2)
266 goto out;
268 ret = do_mlockall(flags);
269 out:
270 up(&current->mm->mmap_sem);
271 return ret;
274 asmlinkage int sys_munlockall(void)
276 int ret;
278 down(&current->mm->mmap_sem);
279 ret = do_mlockall(0);
280 up(&current->mm->mmap_sem);
281 return ret;