Linux 2.1.89-4
[davej-history.git] / mm / mlock.c
blob5bffab93f0b6400e60b7b562dee48f1c1151db32
1 /*
2 * linux/mm/mlock.c
4 * (C) Copyright 1995 Linus Torvalds
5 */
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/shm.h>
12 #include <linux/errno.h>
13 #include <linux/mman.h>
14 #include <linux/string.h>
15 #include <linux/smp.h>
16 #include <linux/smp_lock.h>
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <asm/pgtable.h>
22 static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
24 vma->vm_flags = newflags;
25 return 0;
28 static inline int mlock_fixup_start(struct vm_area_struct * vma,
29 unsigned long end, int newflags)
31 struct vm_area_struct * n;
33 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
34 if (!n)
35 return -EAGAIN;
36 *n = *vma;
37 vma->vm_start = end;
38 n->vm_end = end;
39 vma->vm_offset += vma->vm_start - n->vm_start;
40 n->vm_flags = newflags;
41 if (n->vm_file)
42 n->vm_file->f_count++;
43 if (n->vm_ops && n->vm_ops->open)
44 n->vm_ops->open(n);
45 insert_vm_struct(current->mm, n);
46 return 0;
49 static inline int mlock_fixup_end(struct vm_area_struct * vma,
50 unsigned long start, int newflags)
52 struct vm_area_struct * n;
54 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
55 if (!n)
56 return -EAGAIN;
57 *n = *vma;
58 vma->vm_end = start;
59 n->vm_start = start;
60 n->vm_offset += n->vm_start - vma->vm_start;
61 n->vm_flags = newflags;
62 if (n->vm_file)
63 n->vm_file->f_count++;
64 if (n->vm_ops && n->vm_ops->open)
65 n->vm_ops->open(n);
66 insert_vm_struct(current->mm, n);
67 return 0;
70 static inline int mlock_fixup_middle(struct vm_area_struct * vma,
71 unsigned long start, unsigned long end, int newflags)
73 struct vm_area_struct * left, * right;
75 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
76 if (!left)
77 return -EAGAIN;
78 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
79 if (!right) {
80 kmem_cache_free(vm_area_cachep, left);
81 return -EAGAIN;
83 *left = *vma;
84 *right = *vma;
85 left->vm_end = start;
86 vma->vm_start = start;
87 vma->vm_end = end;
88 right->vm_start = end;
89 vma->vm_offset += vma->vm_start - left->vm_start;
90 right->vm_offset += right->vm_start - left->vm_start;
91 vma->vm_flags = newflags;
92 if (vma->vm_file)
93 vma->vm_file->f_count += 2;
95 if (vma->vm_ops && vma->vm_ops->open) {
96 vma->vm_ops->open(left);
97 vma->vm_ops->open(right);
99 insert_vm_struct(current->mm, left);
100 insert_vm_struct(current->mm, right);
101 return 0;
104 static int mlock_fixup(struct vm_area_struct * vma,
105 unsigned long start, unsigned long end, unsigned int newflags)
107 int pages, retval;
109 if (newflags == vma->vm_flags)
110 return 0;
112 if (start == vma->vm_start) {
113 if (end == vma->vm_end)
114 retval = mlock_fixup_all(vma, newflags);
115 else
116 retval = mlock_fixup_start(vma, end, newflags);
117 } else {
118 if (end == vma->vm_end)
119 retval = mlock_fixup_end(vma, start, newflags);
120 else
121 retval = mlock_fixup_middle(vma, start, end, newflags);
123 if (!retval) {
124 /* keep track of amount of locked VM */
125 pages = (end - start) >> PAGE_SHIFT;
126 if (!(newflags & VM_LOCKED))
127 pages = -pages;
128 vma->vm_mm->locked_vm += pages;
130 if (newflags & VM_LOCKED)
131 while (start < end) {
132 char c;
133 get_user(c,(char *) start);
134 __asm__ __volatile__("": :"r" (c));
135 start += PAGE_SIZE;
138 return retval;
141 static int do_mlock(unsigned long start, size_t len, int on)
143 unsigned long nstart, end, tmp;
144 struct vm_area_struct * vma, * next;
145 int error;
147 if (!suser())
148 return -EPERM;
149 len = (len + ~PAGE_MASK) & PAGE_MASK;
150 end = start + len;
151 if (end < start)
152 return -EINVAL;
153 if (end == start)
154 return 0;
155 vma = find_vma(current->mm, start);
156 if (!vma || vma->vm_start > start)
157 return -ENOMEM;
159 for (nstart = start ; ; ) {
160 unsigned int newflags;
162 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
164 newflags = vma->vm_flags | VM_LOCKED;
165 if (!on)
166 newflags &= ~VM_LOCKED;
168 if (vma->vm_end >= end) {
169 error = mlock_fixup(vma, nstart, end, newflags);
170 break;
173 tmp = vma->vm_end;
174 next = vma->vm_next;
175 error = mlock_fixup(vma, nstart, tmp, newflags);
176 if (error)
177 break;
178 nstart = tmp;
179 vma = next;
180 if (!vma || vma->vm_start != nstart) {
181 error = -ENOMEM;
182 break;
185 merge_segments(current->mm, start, end);
186 return error;
189 asmlinkage int sys_mlock(unsigned long start, size_t len)
191 unsigned long locked;
192 unsigned long lock_limit;
193 int error = -ENOMEM;
195 lock_kernel();
196 len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
197 start &= PAGE_MASK;
199 locked = len >> PAGE_SHIFT;
200 locked += current->mm->locked_vm;
202 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
203 lock_limit >>= PAGE_SHIFT;
205 /* check against resource limits */
206 if (locked > lock_limit)
207 goto out;
209 /* we may lock at most half of physical memory... */
210 /* (this check is pretty bogus, but doesn't hurt) */
211 if (locked > num_physpages/2)
212 goto out;
214 error = do_mlock(start, len, 1);
215 out:
216 unlock_kernel();
217 return error;
220 asmlinkage int sys_munlock(unsigned long start, size_t len)
222 int ret;
224 lock_kernel();
225 len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
226 start &= PAGE_MASK;
227 ret = do_mlock(start, len, 0);
228 unlock_kernel();
229 return ret;
232 static int do_mlockall(int flags)
234 int error;
235 unsigned int def_flags;
236 struct vm_area_struct * vma;
238 if (!suser())
239 return -EPERM;
241 def_flags = 0;
242 if (flags & MCL_FUTURE)
243 def_flags = VM_LOCKED;
244 current->mm->def_flags = def_flags;
246 error = 0;
247 for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
248 unsigned int newflags;
250 newflags = vma->vm_flags | VM_LOCKED;
251 if (!(flags & MCL_CURRENT))
252 newflags &= ~VM_LOCKED;
253 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
254 if (error)
255 break;
257 merge_segments(current->mm, 0, TASK_SIZE);
258 return error;
261 asmlinkage int sys_mlockall(int flags)
263 unsigned long lock_limit;
264 int ret = -EINVAL;
266 lock_kernel();
267 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
268 goto out;
270 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
271 lock_limit >>= PAGE_SHIFT;
273 ret = -ENOMEM;
274 if (current->mm->total_vm > lock_limit)
275 goto out;
277 /* we may lock at most half of physical memory... */
278 /* (this check is pretty bogus, but doesn't hurt) */
279 if (current->mm->total_vm > num_physpages/2)
280 goto out;
282 ret = do_mlockall(flags);
283 out:
284 unlock_kernel();
285 return ret;
288 asmlinkage int sys_munlockall(void)
290 int ret;
292 lock_kernel();
293 ret = do_mlockall(0);
294 unlock_kernel();
295 return ret;