4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/mman.h>
10 #include <linux/syscalls.h>
13 static int mlock_fixup(struct vm_area_struct
* vma
,
14 unsigned long start
, unsigned long end
, unsigned int newflags
)
16 struct mm_struct
* mm
= vma
->vm_mm
;
20 if (newflags
== vma
->vm_flags
)
23 if (start
!= vma
->vm_start
) {
24 if (split_vma(mm
, vma
, start
, 1)) {
30 if (end
!= vma
->vm_end
) {
31 if (split_vma(mm
, vma
, end
, 0)) {
38 * vm_flags is protected by the mmap_sem held in write mode.
39 * It's okay if try_to_unmap_one unmaps a page just after we
40 * set VM_LOCKED, make_pages_present below will bring it back.
42 vma
->vm_flags
= newflags
;
45 * Keep track of amount of locked VM.
47 pages
= (end
- start
) >> PAGE_SHIFT
;
48 if (newflags
& VM_LOCKED
) {
50 ret
= make_pages_present(start
, end
);
53 vma
->vm_mm
->locked_vm
-= pages
;
58 static int do_mlock(unsigned long start
, size_t len
, int on
)
60 unsigned long nstart
, end
, tmp
;
61 struct vm_area_struct
* vma
, * next
;
64 len
= PAGE_ALIGN(len
);
70 vma
= find_vma(current
->mm
, start
);
71 if (!vma
|| vma
->vm_start
> start
)
74 for (nstart
= start
; ; ) {
75 unsigned int newflags
;
77 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
79 newflags
= vma
->vm_flags
| VM_LOCKED
;
81 newflags
&= ~VM_LOCKED
;
83 if (vma
->vm_end
>= end
) {
84 error
= mlock_fixup(vma
, nstart
, end
, newflags
);
90 error
= mlock_fixup(vma
, nstart
, tmp
, newflags
);
95 if (!vma
|| vma
->vm_start
!= nstart
) {
103 asmlinkage
long sys_mlock(unsigned long start
, size_t len
)
105 unsigned long locked
;
106 unsigned long lock_limit
;
112 down_write(¤t
->mm
->mmap_sem
);
113 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
116 locked
= len
>> PAGE_SHIFT
;
117 locked
+= current
->mm
->locked_vm
;
119 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
120 lock_limit
>>= PAGE_SHIFT
;
122 /* check against resource limits */
123 if ((locked
<= lock_limit
) || capable(CAP_IPC_LOCK
))
124 error
= do_mlock(start
, len
, 1);
125 up_write(¤t
->mm
->mmap_sem
);
129 asmlinkage
long sys_munlock(unsigned long start
, size_t len
)
133 down_write(¤t
->mm
->mmap_sem
);
134 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
136 ret
= do_mlock(start
, len
, 0);
137 up_write(¤t
->mm
->mmap_sem
);
141 static int do_mlockall(int flags
)
143 struct vm_area_struct
* vma
;
144 unsigned int def_flags
= 0;
146 if (flags
& MCL_FUTURE
)
147 def_flags
= VM_LOCKED
;
148 current
->mm
->def_flags
= def_flags
;
149 if (flags
== MCL_FUTURE
)
152 for (vma
= current
->mm
->mmap
; vma
; vma
= vma
->vm_next
) {
153 unsigned int newflags
;
155 newflags
= vma
->vm_flags
| VM_LOCKED
;
156 if (!(flags
& MCL_CURRENT
))
157 newflags
&= ~VM_LOCKED
;
160 mlock_fixup(vma
, vma
->vm_start
, vma
->vm_end
, newflags
);
166 asmlinkage
long sys_mlockall(int flags
)
168 unsigned long lock_limit
;
171 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
178 down_write(¤t
->mm
->mmap_sem
);
180 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
181 lock_limit
>>= PAGE_SHIFT
;
184 if (!(flags
& MCL_CURRENT
) || (current
->mm
->total_vm
<= lock_limit
) ||
185 capable(CAP_IPC_LOCK
))
186 ret
= do_mlockall(flags
);
187 up_write(¤t
->mm
->mmap_sem
);
192 asmlinkage
long sys_munlockall(void)
196 down_write(¤t
->mm
->mmap_sem
);
197 ret
= do_mlockall(0);
198 up_write(¤t
->mm
->mmap_sem
);
203 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
204 * shm segments) get accounted against the user_struct instead.
206 static spinlock_t shmlock_user_lock
= SPIN_LOCK_UNLOCKED
;
208 int user_shm_lock(size_t size
, struct user_struct
*user
)
210 unsigned long lock_limit
, locked
;
213 spin_lock(&shmlock_user_lock
);
214 locked
= size
>> PAGE_SHIFT
;
215 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
216 lock_limit
>>= PAGE_SHIFT
;
217 if (locked
+ user
->locked_shm
> lock_limit
&& !capable(CAP_IPC_LOCK
))
220 user
->locked_shm
+= locked
;
223 spin_unlock(&shmlock_user_lock
);
227 void user_shm_unlock(size_t size
, struct user_struct
*user
)
229 spin_lock(&shmlock_user_lock
);
230 user
->locked_shm
-= (size
>> PAGE_SHIFT
);
231 spin_unlock(&shmlock_user_lock
);