Cleanup syscall code to look more like it's mips64 equivalent.
[linux-2.6/linux-mips.git] / mm / mlock.c
blob21c8fbfa53a5e9cc811968c24205d85acc1f3c07
1 /*
2 * linux/mm/mlock.c
4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 */
8 #include <linux/mman.h>
9 #include <linux/mm.h>
12 static int mlock_fixup(struct vm_area_struct * vma,
13 unsigned long start, unsigned long end, unsigned int newflags)
15 struct mm_struct * mm = vma->vm_mm;
16 int pages, error;
18 if (newflags == vma->vm_flags)
19 return 0;
21 if (start != vma->vm_start) {
22 error = split_vma(mm, vma, start, 1);
23 if (error)
24 return -EAGAIN;
27 if (end != vma->vm_end) {
28 error = split_vma(mm, vma, end, 0);
29 if (error)
30 return -EAGAIN;
33 spin_lock(&mm->page_table_lock);
34 vma->vm_flags = newflags;
35 spin_unlock(&mm->page_table_lock);
38 * Keep track of amount of locked VM.
40 pages = (end - start) >> PAGE_SHIFT;
41 if (newflags & VM_LOCKED) {
42 pages = -pages;
43 make_pages_present(start, end);
46 vma->vm_mm->locked_vm -= pages;
47 return 0;
50 static int do_mlock(unsigned long start, size_t len, int on)
52 unsigned long nstart, end, tmp;
53 struct vm_area_struct * vma, * next;
54 int error;
56 if (on && !capable(CAP_IPC_LOCK))
57 return -EPERM;
58 len = PAGE_ALIGN(len);
59 end = start + len;
60 if (end < start)
61 return -EINVAL;
62 if (end == start)
63 return 0;
64 vma = find_vma(current->mm, start);
65 if (!vma || vma->vm_start > start)
66 return -ENOMEM;
68 for (nstart = start ; ; ) {
69 unsigned int newflags;
71 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
73 newflags = vma->vm_flags | VM_LOCKED;
74 if (!on)
75 newflags &= ~VM_LOCKED;
77 if (vma->vm_end >= end) {
78 error = mlock_fixup(vma, nstart, end, newflags);
79 break;
82 tmp = vma->vm_end;
83 next = vma->vm_next;
84 error = mlock_fixup(vma, nstart, tmp, newflags);
85 if (error)
86 break;
87 nstart = tmp;
88 vma = next;
89 if (!vma || vma->vm_start != nstart) {
90 error = -ENOMEM;
91 break;
94 return error;
97 asmlinkage long sys_mlock(unsigned long start, size_t len)
99 unsigned long locked;
100 unsigned long lock_limit;
101 int error = -ENOMEM;
103 down_write(&current->mm->mmap_sem);
104 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
105 start &= PAGE_MASK;
107 locked = len >> PAGE_SHIFT;
108 locked += current->mm->locked_vm;
110 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
111 lock_limit >>= PAGE_SHIFT;
113 /* check against resource limits */
114 if (locked <= lock_limit)
115 error = do_mlock(start, len, 1);
116 up_write(&current->mm->mmap_sem);
117 return error;
120 asmlinkage long sys_munlock(unsigned long start, size_t len)
122 int ret;
124 down_write(&current->mm->mmap_sem);
125 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
126 start &= PAGE_MASK;
127 ret = do_mlock(start, len, 0);
128 up_write(&current->mm->mmap_sem);
129 return ret;
132 static int do_mlockall(int flags)
134 int error;
135 unsigned int def_flags;
136 struct vm_area_struct * vma;
138 if (!capable(CAP_IPC_LOCK))
139 return -EPERM;
141 def_flags = 0;
142 if (flags & MCL_FUTURE)
143 def_flags = VM_LOCKED;
144 current->mm->def_flags = def_flags;
146 error = 0;
147 for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
148 unsigned int newflags;
150 newflags = vma->vm_flags | VM_LOCKED;
151 if (!(flags & MCL_CURRENT))
152 newflags &= ~VM_LOCKED;
153 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
154 if (error)
155 break;
157 return error;
160 asmlinkage long sys_mlockall(int flags)
162 unsigned long lock_limit;
163 int ret = -EINVAL;
165 down_write(&current->mm->mmap_sem);
166 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
167 goto out;
169 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
170 lock_limit >>= PAGE_SHIFT;
172 ret = -ENOMEM;
173 if (current->mm->total_vm <= lock_limit)
174 ret = do_mlockall(flags);
175 out:
176 up_write(&current->mm->mmap_sem);
177 return ret;
180 asmlinkage long sys_munlockall(void)
182 int ret;
184 down_write(&current->mm->mmap_sem);
185 ret = do_mlockall(0);
186 up_write(&current->mm->mmap_sem);
187 return ret;