4 * (C) Copyright 1995 Linus Torvalds
6 #include <linux/slab.h>
8 #include <linux/mman.h>
9 #include <linux/smp_lock.h>
10 #include <linux/pagemap.h>
12 #include <asm/uaccess.h>
13 #include <asm/pgtable.h>
15 static inline int mlock_fixup_all(struct vm_area_struct
* vma
, int newflags
)
17 vmlist_modify_lock(vma
->vm_mm
);
18 vma
->vm_flags
= newflags
;
19 vmlist_modify_unlock(vma
->vm_mm
);
23 static inline int mlock_fixup_start(struct vm_area_struct
* vma
,
24 unsigned long end
, int newflags
)
26 struct vm_area_struct
* n
;
28 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
33 n
->vm_flags
= newflags
;
37 if (n
->vm_ops
&& n
->vm_ops
->open
)
39 vmlist_modify_lock(vma
->vm_mm
);
40 vma
->vm_pgoff
+= (end
- vma
->vm_start
) >> PAGE_SHIFT
;
42 insert_vm_struct(current
->mm
, n
);
43 vmlist_modify_unlock(vma
->vm_mm
);
47 static inline int mlock_fixup_end(struct vm_area_struct
* vma
,
48 unsigned long start
, int newflags
)
50 struct vm_area_struct
* n
;
52 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
57 n
->vm_pgoff
+= (n
->vm_start
- vma
->vm_start
) >> PAGE_SHIFT
;
58 n
->vm_flags
= newflags
;
62 if (n
->vm_ops
&& n
->vm_ops
->open
)
64 vmlist_modify_lock(vma
->vm_mm
);
66 insert_vm_struct(current
->mm
, n
);
67 vmlist_modify_unlock(vma
->vm_mm
);
71 static inline int mlock_fixup_middle(struct vm_area_struct
* vma
,
72 unsigned long start
, unsigned long end
, int newflags
)
74 struct vm_area_struct
* left
, * right
;
76 left
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
79 right
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
81 kmem_cache_free(vm_area_cachep
, left
);
87 right
->vm_start
= end
;
88 right
->vm_pgoff
+= (right
->vm_start
- left
->vm_start
) >> PAGE_SHIFT
;
89 vma
->vm_flags
= newflags
;
93 atomic_add(2, &vma
->vm_file
->f_count
);
95 if (vma
->vm_ops
&& vma
->vm_ops
->open
) {
96 vma
->vm_ops
->open(left
);
97 vma
->vm_ops
->open(right
);
99 vmlist_modify_lock(vma
->vm_mm
);
100 vma
->vm_pgoff
+= (start
- vma
->vm_start
) >> PAGE_SHIFT
;
101 vma
->vm_start
= start
;
103 vma
->vm_flags
= newflags
;
105 insert_vm_struct(current
->mm
, left
);
106 insert_vm_struct(current
->mm
, right
);
107 vmlist_modify_unlock(vma
->vm_mm
);
111 static int mlock_fixup(struct vm_area_struct
* vma
,
112 unsigned long start
, unsigned long end
, unsigned int newflags
)
116 if (newflags
== vma
->vm_flags
)
119 if (start
== vma
->vm_start
) {
120 if (end
== vma
->vm_end
)
121 retval
= mlock_fixup_all(vma
, newflags
);
123 retval
= mlock_fixup_start(vma
, end
, newflags
);
125 if (end
== vma
->vm_end
)
126 retval
= mlock_fixup_end(vma
, start
, newflags
);
128 retval
= mlock_fixup_middle(vma
, start
, end
, newflags
);
131 /* keep track of amount of locked VM */
132 pages
= (end
- start
) >> PAGE_SHIFT
;
133 if (newflags
& VM_LOCKED
) {
135 make_pages_present(start
, end
);
137 vma
->vm_mm
->locked_vm
-= pages
;
142 static int do_mlock(unsigned long start
, size_t len
, int on
)
144 unsigned long nstart
, end
, tmp
;
145 struct vm_area_struct
* vma
, * next
;
148 if (on
&& !capable(CAP_IPC_LOCK
))
150 len
= PAGE_ALIGN(len
);
156 vma
= find_vma(current
->mm
, start
);
157 if (!vma
|| vma
->vm_start
> start
)
160 for (nstart
= start
; ; ) {
161 unsigned int newflags
;
163 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
165 newflags
= vma
->vm_flags
| VM_LOCKED
;
167 newflags
&= ~VM_LOCKED
;
169 if (vma
->vm_end
>= end
) {
170 error
= mlock_fixup(vma
, nstart
, end
, newflags
);
176 error
= mlock_fixup(vma
, nstart
, tmp
, newflags
);
181 if (!vma
|| vma
->vm_start
!= nstart
) {
186 vmlist_modify_lock(current
->mm
);
187 merge_segments(current
->mm
, start
, end
);
188 vmlist_modify_unlock(current
->mm
);
192 asmlinkage
long sys_mlock(unsigned long start
, size_t len
)
194 unsigned long locked
;
195 unsigned long lock_limit
;
198 down(¤t
->mm
->mmap_sem
);
199 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
202 locked
= len
>> PAGE_SHIFT
;
203 locked
+= current
->mm
->locked_vm
;
205 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
206 lock_limit
>>= PAGE_SHIFT
;
208 /* check against resource limits */
209 if (locked
> lock_limit
)
212 /* we may lock at most half of physical memory... */
213 /* (this check is pretty bogus, but doesn't hurt) */
214 if (locked
> num_physpages
/2)
217 error
= do_mlock(start
, len
, 1);
219 up(¤t
->mm
->mmap_sem
);
223 asmlinkage
long sys_munlock(unsigned long start
, size_t len
)
227 down(¤t
->mm
->mmap_sem
);
228 len
= PAGE_ALIGN(len
+ (start
& ~PAGE_MASK
));
230 ret
= do_mlock(start
, len
, 0);
231 up(¤t
->mm
->mmap_sem
);
235 static int do_mlockall(int flags
)
238 unsigned int def_flags
;
239 struct vm_area_struct
* vma
;
241 if (!capable(CAP_IPC_LOCK
))
245 if (flags
& MCL_FUTURE
)
246 def_flags
= VM_LOCKED
;
247 current
->mm
->def_flags
= def_flags
;
250 for (vma
= current
->mm
->mmap
; vma
; vma
= vma
->vm_next
) {
251 unsigned int newflags
;
253 newflags
= vma
->vm_flags
| VM_LOCKED
;
254 if (!(flags
& MCL_CURRENT
))
255 newflags
&= ~VM_LOCKED
;
256 error
= mlock_fixup(vma
, vma
->vm_start
, vma
->vm_end
, newflags
);
260 vmlist_modify_lock(current
->mm
);
261 merge_segments(current
->mm
, 0, TASK_SIZE
);
262 vmlist_modify_unlock(current
->mm
);
266 asmlinkage
long sys_mlockall(int flags
)
268 unsigned long lock_limit
;
271 down(¤t
->mm
->mmap_sem
);
272 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
275 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
276 lock_limit
>>= PAGE_SHIFT
;
279 if (current
->mm
->total_vm
> lock_limit
)
282 /* we may lock at most half of physical memory... */
283 /* (this check is pretty bogus, but doesn't hurt) */
284 if (current
->mm
->total_vm
> num_physpages
/2)
287 ret
= do_mlockall(flags
);
289 up(¤t
->mm
->mmap_sem
);
293 asmlinkage
long sys_munlockall(void)
297 down(¤t
->mm
->mmap_sem
);
298 ret
= do_mlockall(0);
299 up(¤t
->mm
->mmap_sem
);