4 * (C) Copyright 1995 Linus Torvalds
6 #include <linux/slab.h>
8 #include <linux/mman.h>
9 #include <linux/smp_lock.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgtable.h>
14 static inline int mlock_fixup_all(struct vm_area_struct
* vma
, int newflags
)
16 vma
->vm_flags
= newflags
;
20 static inline int mlock_fixup_start(struct vm_area_struct
* vma
,
21 unsigned long end
, int newflags
)
23 struct vm_area_struct
* n
;
25 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
31 vma
->vm_offset
+= vma
->vm_start
- n
->vm_start
;
32 n
->vm_flags
= newflags
;
35 if (n
->vm_ops
&& n
->vm_ops
->open
)
37 insert_vm_struct(current
->mm
, n
);
41 static inline int mlock_fixup_end(struct vm_area_struct
* vma
,
42 unsigned long start
, int newflags
)
44 struct vm_area_struct
* n
;
46 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
52 n
->vm_offset
+= n
->vm_start
- vma
->vm_start
;
53 n
->vm_flags
= newflags
;
56 if (n
->vm_ops
&& n
->vm_ops
->open
)
58 insert_vm_struct(current
->mm
, n
);
62 static inline int mlock_fixup_middle(struct vm_area_struct
* vma
,
63 unsigned long start
, unsigned long end
, int newflags
)
65 struct vm_area_struct
* left
, * right
;
67 left
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
70 right
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
72 kmem_cache_free(vm_area_cachep
, left
);
78 vma
->vm_start
= start
;
80 right
->vm_start
= end
;
81 vma
->vm_offset
+= vma
->vm_start
- left
->vm_start
;
82 right
->vm_offset
+= right
->vm_start
- left
->vm_start
;
83 vma
->vm_flags
= newflags
;
85 atomic_add(2, &vma
->vm_file
->f_count
);
87 if (vma
->vm_ops
&& vma
->vm_ops
->open
) {
88 vma
->vm_ops
->open(left
);
89 vma
->vm_ops
->open(right
);
91 insert_vm_struct(current
->mm
, left
);
92 insert_vm_struct(current
->mm
, right
);
96 static int mlock_fixup(struct vm_area_struct
* vma
,
97 unsigned long start
, unsigned long end
, unsigned int newflags
)
101 if (newflags
== vma
->vm_flags
)
104 if (start
== vma
->vm_start
) {
105 if (end
== vma
->vm_end
)
106 retval
= mlock_fixup_all(vma
, newflags
);
108 retval
= mlock_fixup_start(vma
, end
, newflags
);
110 if (end
== vma
->vm_end
)
111 retval
= mlock_fixup_end(vma
, start
, newflags
);
113 retval
= mlock_fixup_middle(vma
, start
, end
, newflags
);
116 /* keep track of amount of locked VM */
117 pages
= (end
- start
) >> PAGE_SHIFT
;
118 if (newflags
& VM_LOCKED
) {
120 make_pages_present(start
, end
);
122 vma
->vm_mm
->locked_vm
-= pages
;
127 static int do_mlock(unsigned long start
, size_t len
, int on
)
129 unsigned long nstart
, end
, tmp
;
130 struct vm_area_struct
* vma
, * next
;
133 if (!capable(CAP_IPC_LOCK
))
135 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
141 vma
= find_vma(current
->mm
, start
);
142 if (!vma
|| vma
->vm_start
> start
)
145 for (nstart
= start
; ; ) {
146 unsigned int newflags
;
148 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
150 newflags
= vma
->vm_flags
| VM_LOCKED
;
152 newflags
&= ~VM_LOCKED
;
154 if (vma
->vm_end
>= end
) {
155 error
= mlock_fixup(vma
, nstart
, end
, newflags
);
161 error
= mlock_fixup(vma
, nstart
, tmp
, newflags
);
166 if (!vma
|| vma
->vm_start
!= nstart
) {
171 merge_segments(current
->mm
, start
, end
);
175 asmlinkage
int sys_mlock(unsigned long start
, size_t len
)
177 unsigned long locked
;
178 unsigned long lock_limit
;
181 down(¤t
->mm
->mmap_sem
);
182 len
= (len
+ (start
& ~PAGE_MASK
) + ~PAGE_MASK
) & PAGE_MASK
;
185 locked
= len
>> PAGE_SHIFT
;
186 locked
+= current
->mm
->locked_vm
;
188 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
189 lock_limit
>>= PAGE_SHIFT
;
191 /* check against resource limits */
192 if (locked
> lock_limit
)
195 /* we may lock at most half of physical memory... */
196 /* (this check is pretty bogus, but doesn't hurt) */
197 if (locked
> num_physpages
/2)
200 error
= do_mlock(start
, len
, 1);
202 up(¤t
->mm
->mmap_sem
);
206 asmlinkage
int sys_munlock(unsigned long start
, size_t len
)
210 down(¤t
->mm
->mmap_sem
);
211 len
= (len
+ (start
& ~PAGE_MASK
) + ~PAGE_MASK
) & PAGE_MASK
;
213 ret
= do_mlock(start
, len
, 0);
214 up(¤t
->mm
->mmap_sem
);
218 static int do_mlockall(int flags
)
221 unsigned int def_flags
;
222 struct vm_area_struct
* vma
;
224 if (!capable(CAP_IPC_LOCK
))
228 if (flags
& MCL_FUTURE
)
229 def_flags
= VM_LOCKED
;
230 current
->mm
->def_flags
= def_flags
;
233 for (vma
= current
->mm
->mmap
; vma
; vma
= vma
->vm_next
) {
234 unsigned int newflags
;
236 newflags
= vma
->vm_flags
| VM_LOCKED
;
237 if (!(flags
& MCL_CURRENT
))
238 newflags
&= ~VM_LOCKED
;
239 error
= mlock_fixup(vma
, vma
->vm_start
, vma
->vm_end
, newflags
);
243 merge_segments(current
->mm
, 0, TASK_SIZE
);
247 asmlinkage
int sys_mlockall(int flags
)
249 unsigned long lock_limit
;
252 down(¤t
->mm
->mmap_sem
);
253 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
256 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
257 lock_limit
>>= PAGE_SHIFT
;
260 if (current
->mm
->total_vm
> lock_limit
)
263 /* we may lock at most half of physical memory... */
264 /* (this check is pretty bogus, but doesn't hurt) */
265 if (current
->mm
->total_vm
> num_physpages
/2)
268 ret
= do_mlockall(flags
);
270 up(¤t
->mm
->mmap_sem
);
274 asmlinkage
int sys_munlockall(void)
278 down(¤t
->mm
->mmap_sem
);
279 ret
= do_mlockall(0);
280 up(¤t
->mm
->mmap_sem
);