Beautify diagnostic messages.
[linux-2.6/linux-mips.git] / mm / mlock.c
bloba3d10ff99dc50ae438ec087fc7c9420ca61a70f4
1 /*
2 * linux/mm/mlock.c
4 * (C) Copyright 1995 Linus Torvalds
5 */
6 #include <linux/slab.h>
7 #include <linux/shm.h>
8 #include <linux/mman.h>
9 #include <linux/smp_lock.h>
10 #include <linux/pagemap.h>
12 #include <asm/uaccess.h>
13 #include <asm/pgtable.h>
15 static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
17 vmlist_modify_lock(vma->vm_mm);
18 vma->vm_flags = newflags;
19 vmlist_modify_unlock(vma->vm_mm);
20 return 0;
23 static inline int mlock_fixup_start(struct vm_area_struct * vma,
24 unsigned long end, int newflags)
26 struct vm_area_struct * n;
28 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
29 if (!n)
30 return -EAGAIN;
31 *n = *vma;
32 n->vm_end = end;
33 n->vm_flags = newflags;
34 n->vm_raend = 0;
35 if (n->vm_file)
36 get_file(n->vm_file);
37 if (n->vm_ops && n->vm_ops->open)
38 n->vm_ops->open(n);
39 vmlist_modify_lock(vma->vm_mm);
40 vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
41 vma->vm_start = end;
42 insert_vm_struct(current->mm, n);
43 vmlist_modify_unlock(vma->vm_mm);
44 return 0;
47 static inline int mlock_fixup_end(struct vm_area_struct * vma,
48 unsigned long start, int newflags)
50 struct vm_area_struct * n;
52 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
53 if (!n)
54 return -EAGAIN;
55 *n = *vma;
56 n->vm_start = start;
57 n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
58 n->vm_flags = newflags;
59 n->vm_raend = 0;
60 if (n->vm_file)
61 get_file(n->vm_file);
62 if (n->vm_ops && n->vm_ops->open)
63 n->vm_ops->open(n);
64 vmlist_modify_lock(vma->vm_mm);
65 vma->vm_end = start;
66 insert_vm_struct(current->mm, n);
67 vmlist_modify_unlock(vma->vm_mm);
68 return 0;
71 static inline int mlock_fixup_middle(struct vm_area_struct * vma,
72 unsigned long start, unsigned long end, int newflags)
74 struct vm_area_struct * left, * right;
76 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
77 if (!left)
78 return -EAGAIN;
79 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
80 if (!right) {
81 kmem_cache_free(vm_area_cachep, left);
82 return -EAGAIN;
84 *left = *vma;
85 *right = *vma;
86 left->vm_end = start;
87 right->vm_start = end;
88 right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
89 vma->vm_flags = newflags;
90 left->vm_raend = 0;
91 right->vm_raend = 0;
92 if (vma->vm_file)
93 atomic_add(2, &vma->vm_file->f_count);
95 if (vma->vm_ops && vma->vm_ops->open) {
96 vma->vm_ops->open(left);
97 vma->vm_ops->open(right);
99 vmlist_modify_lock(vma->vm_mm);
100 vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
101 vma->vm_start = start;
102 vma->vm_end = end;
103 vma->vm_flags = newflags;
104 vma->vm_raend = 0;
105 insert_vm_struct(current->mm, left);
106 insert_vm_struct(current->mm, right);
107 vmlist_modify_unlock(vma->vm_mm);
108 return 0;
111 static int mlock_fixup(struct vm_area_struct * vma,
112 unsigned long start, unsigned long end, unsigned int newflags)
114 int pages, retval;
116 if (newflags == vma->vm_flags)
117 return 0;
119 if (start == vma->vm_start) {
120 if (end == vma->vm_end)
121 retval = mlock_fixup_all(vma, newflags);
122 else
123 retval = mlock_fixup_start(vma, end, newflags);
124 } else {
125 if (end == vma->vm_end)
126 retval = mlock_fixup_end(vma, start, newflags);
127 else
128 retval = mlock_fixup_middle(vma, start, end, newflags);
130 if (!retval) {
131 /* keep track of amount of locked VM */
132 pages = (end - start) >> PAGE_SHIFT;
133 if (newflags & VM_LOCKED) {
134 pages = -pages;
135 make_pages_present(start, end);
137 vma->vm_mm->locked_vm -= pages;
139 return retval;
142 static int do_mlock(unsigned long start, size_t len, int on)
144 unsigned long nstart, end, tmp;
145 struct vm_area_struct * vma, * next;
146 int error;
148 if (on && !capable(CAP_IPC_LOCK))
149 return -EPERM;
150 len = PAGE_ALIGN(len);
151 end = start + len;
152 if (end < start)
153 return -EINVAL;
154 if (end == start)
155 return 0;
156 vma = find_vma(current->mm, start);
157 if (!vma || vma->vm_start > start)
158 return -ENOMEM;
160 for (nstart = start ; ; ) {
161 unsigned int newflags;
163 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
165 newflags = vma->vm_flags | VM_LOCKED;
166 if (!on)
167 newflags &= ~VM_LOCKED;
169 if (vma->vm_end >= end) {
170 error = mlock_fixup(vma, nstart, end, newflags);
171 break;
174 tmp = vma->vm_end;
175 next = vma->vm_next;
176 error = mlock_fixup(vma, nstart, tmp, newflags);
177 if (error)
178 break;
179 nstart = tmp;
180 vma = next;
181 if (!vma || vma->vm_start != nstart) {
182 error = -ENOMEM;
183 break;
186 vmlist_modify_lock(current->mm);
187 merge_segments(current->mm, start, end);
188 vmlist_modify_unlock(current->mm);
189 return error;
192 asmlinkage long sys_mlock(unsigned long start, size_t len)
194 unsigned long locked;
195 unsigned long lock_limit;
196 int error = -ENOMEM;
198 down(&current->mm->mmap_sem);
199 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
200 start &= PAGE_MASK;
202 locked = len >> PAGE_SHIFT;
203 locked += current->mm->locked_vm;
205 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
206 lock_limit >>= PAGE_SHIFT;
208 /* check against resource limits */
209 if (locked > lock_limit)
210 goto out;
212 /* we may lock at most half of physical memory... */
213 /* (this check is pretty bogus, but doesn't hurt) */
214 if (locked > num_physpages/2)
215 goto out;
217 error = do_mlock(start, len, 1);
218 out:
219 up(&current->mm->mmap_sem);
220 return error;
223 asmlinkage long sys_munlock(unsigned long start, size_t len)
225 int ret;
227 down(&current->mm->mmap_sem);
228 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
229 start &= PAGE_MASK;
230 ret = do_mlock(start, len, 0);
231 up(&current->mm->mmap_sem);
232 return ret;
235 static int do_mlockall(int flags)
237 int error;
238 unsigned int def_flags;
239 struct vm_area_struct * vma;
241 if (!capable(CAP_IPC_LOCK))
242 return -EPERM;
244 def_flags = 0;
245 if (flags & MCL_FUTURE)
246 def_flags = VM_LOCKED;
247 current->mm->def_flags = def_flags;
249 error = 0;
250 for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
251 unsigned int newflags;
253 newflags = vma->vm_flags | VM_LOCKED;
254 if (!(flags & MCL_CURRENT))
255 newflags &= ~VM_LOCKED;
256 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
257 if (error)
258 break;
260 vmlist_modify_lock(current->mm);
261 merge_segments(current->mm, 0, TASK_SIZE);
262 vmlist_modify_unlock(current->mm);
263 return error;
266 asmlinkage long sys_mlockall(int flags)
268 unsigned long lock_limit;
269 int ret = -EINVAL;
271 down(&current->mm->mmap_sem);
272 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
273 goto out;
275 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
276 lock_limit >>= PAGE_SHIFT;
278 ret = -ENOMEM;
279 if (current->mm->total_vm > lock_limit)
280 goto out;
282 /* we may lock at most half of physical memory... */
283 /* (this check is pretty bogus, but doesn't hurt) */
284 if (current->mm->total_vm > num_physpages/2)
285 goto out;
287 ret = do_mlockall(flags);
288 out:
289 up(&current->mm->mmap_sem);
290 return ret;
293 asmlinkage long sys_munlockall(void)
295 int ret;
297 down(&current->mm->mmap_sem);
298 ret = do_mlockall(0);
299 up(&current->mm->mmap_sem);
300 return ret;