initial commit with v2.6.9
[linux-2.6.9-moxart.git] / mm / mlock.c
blob873245bc96979aba4d31028ecc885bc0c4db22e9
1 /*
2 * linux/mm/mlock.c
4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 */
8 #include <linux/mman.h>
9 #include <linux/mm.h>
12 static int mlock_fixup(struct vm_area_struct * vma,
13 unsigned long start, unsigned long end, unsigned int newflags)
15 struct mm_struct * mm = vma->vm_mm;
16 int pages;
17 int ret = 0;
19 if (newflags == vma->vm_flags)
20 goto out;
22 if (start != vma->vm_start) {
23 if (split_vma(mm, vma, start, 1)) {
24 ret = -EAGAIN;
25 goto out;
29 if (end != vma->vm_end) {
30 if (split_vma(mm, vma, end, 0)) {
31 ret = -EAGAIN;
32 goto out;
37 * vm_flags is protected by the mmap_sem held in write mode.
38 * It's okay if try_to_unmap_one unmaps a page just after we
39 * set VM_LOCKED, make_pages_present below will bring it back.
41 vma->vm_flags = newflags;
44 * Keep track of amount of locked VM.
46 pages = (end - start) >> PAGE_SHIFT;
47 if (newflags & VM_LOCKED) {
48 pages = -pages;
49 ret = make_pages_present(start, end);
52 vma->vm_mm->locked_vm -= pages;
53 out:
54 return ret;
57 static int do_mlock(unsigned long start, size_t len, int on)
59 unsigned long nstart, end, tmp;
60 struct vm_area_struct * vma, * next;
61 int error;
63 len = PAGE_ALIGN(len);
64 end = start + len;
65 if (end < start)
66 return -EINVAL;
67 if (end == start)
68 return 0;
69 vma = find_vma(current->mm, start);
70 if (!vma || vma->vm_start > start)
71 return -ENOMEM;
73 for (nstart = start ; ; ) {
74 unsigned int newflags;
76 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
78 newflags = vma->vm_flags | VM_LOCKED;
79 if (!on)
80 newflags &= ~VM_LOCKED;
82 if (vma->vm_end >= end) {
83 error = mlock_fixup(vma, nstart, end, newflags);
84 break;
87 tmp = vma->vm_end;
88 next = vma->vm_next;
89 error = mlock_fixup(vma, nstart, tmp, newflags);
90 if (error)
91 break;
92 nstart = tmp;
93 vma = next;
94 if (!vma || vma->vm_start != nstart) {
95 error = -ENOMEM;
96 break;
99 return error;
102 asmlinkage long sys_mlock(unsigned long start, size_t len)
104 unsigned long locked;
105 unsigned long lock_limit;
106 int error = -ENOMEM;
108 if (!can_do_mlock())
109 return -EPERM;
111 down_write(&current->mm->mmap_sem);
112 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
113 start &= PAGE_MASK;
115 locked = len >> PAGE_SHIFT;
116 locked += current->mm->locked_vm;
118 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
119 lock_limit >>= PAGE_SHIFT;
121 /* check against resource limits */
122 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
123 error = do_mlock(start, len, 1);
124 up_write(&current->mm->mmap_sem);
125 return error;
128 asmlinkage long sys_munlock(unsigned long start, size_t len)
130 int ret;
132 down_write(&current->mm->mmap_sem);
133 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
134 start &= PAGE_MASK;
135 ret = do_mlock(start, len, 0);
136 up_write(&current->mm->mmap_sem);
137 return ret;
140 static int do_mlockall(int flags)
142 struct vm_area_struct * vma;
143 unsigned int def_flags = 0;
145 if (flags & MCL_FUTURE)
146 def_flags = VM_LOCKED;
147 current->mm->def_flags = def_flags;
148 if (flags == MCL_FUTURE)
149 goto out;
151 for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
152 unsigned int newflags;
154 newflags = vma->vm_flags | VM_LOCKED;
155 if (!(flags & MCL_CURRENT))
156 newflags &= ~VM_LOCKED;
158 /* Ignore errors */
159 mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
161 out:
162 return 0;
165 asmlinkage long sys_mlockall(int flags)
167 unsigned long lock_limit;
168 int ret = -EINVAL;
170 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
171 goto out;
173 ret = -EPERM;
174 if (!can_do_mlock())
175 goto out;
177 down_write(&current->mm->mmap_sem);
179 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
180 lock_limit >>= PAGE_SHIFT;
182 ret = -ENOMEM;
183 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
184 capable(CAP_IPC_LOCK))
185 ret = do_mlockall(flags);
186 up_write(&current->mm->mmap_sem);
187 out:
188 return ret;
191 asmlinkage long sys_munlockall(void)
193 int ret;
195 down_write(&current->mm->mmap_sem);
196 ret = do_mlockall(0);
197 up_write(&current->mm->mmap_sem);
198 return ret;
202 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
203 * shm segments) get accounted against the user_struct instead.
205 static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED;
207 int user_shm_lock(size_t size, struct user_struct *user)
209 unsigned long lock_limit, locked;
210 int allowed = 0;
212 spin_lock(&shmlock_user_lock);
213 locked = size >> PAGE_SHIFT;
214 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
215 lock_limit >>= PAGE_SHIFT;
216 if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
217 goto out;
218 get_uid(user);
219 user->locked_shm += locked;
220 allowed = 1;
221 out:
222 spin_unlock(&shmlock_user_lock);
223 return allowed;
226 void user_shm_unlock(size_t size, struct user_struct *user)
228 spin_lock(&shmlock_user_lock);
229 user->locked_shm -= (size >> PAGE_SHIFT);
230 spin_unlock(&shmlock_user_lock);
231 free_uid(user);