4 * (C) Copyright 1995 Linus Torvalds
6 #include <linux/slab.h>
8 #include <linux/mman.h>
9 #include <linux/smp_lock.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgtable.h>
14 static inline int mlock_fixup_all(struct vm_area_struct
* vma
, int newflags
)
16 vma
->vm_flags
= newflags
;
20 static inline int mlock_fixup_start(struct vm_area_struct
* vma
,
21 unsigned long end
, int newflags
)
23 struct vm_area_struct
* n
;
25 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
31 vma
->vm_offset
+= vma
->vm_start
- n
->vm_start
;
32 n
->vm_flags
= newflags
;
34 n
->vm_file
->f_count
++;
35 if (n
->vm_ops
&& n
->vm_ops
->open
)
37 insert_vm_struct(current
->mm
, n
);
41 static inline int mlock_fixup_end(struct vm_area_struct
* vma
,
42 unsigned long start
, int newflags
)
44 struct vm_area_struct
* n
;
46 n
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
52 n
->vm_offset
+= n
->vm_start
- vma
->vm_start
;
53 n
->vm_flags
= newflags
;
55 n
->vm_file
->f_count
++;
56 if (n
->vm_ops
&& n
->vm_ops
->open
)
58 insert_vm_struct(current
->mm
, n
);
62 static inline int mlock_fixup_middle(struct vm_area_struct
* vma
,
63 unsigned long start
, unsigned long end
, int newflags
)
65 struct vm_area_struct
* left
, * right
;
67 left
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
70 right
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
72 kmem_cache_free(vm_area_cachep
, left
);
78 vma
->vm_start
= start
;
80 right
->vm_start
= end
;
81 vma
->vm_offset
+= vma
->vm_start
- left
->vm_start
;
82 right
->vm_offset
+= right
->vm_start
- left
->vm_start
;
83 vma
->vm_flags
= newflags
;
85 vma
->vm_file
->f_count
+= 2;
87 if (vma
->vm_ops
&& vma
->vm_ops
->open
) {
88 vma
->vm_ops
->open(left
);
89 vma
->vm_ops
->open(right
);
91 insert_vm_struct(current
->mm
, left
);
92 insert_vm_struct(current
->mm
, right
);
96 static int mlock_fixup(struct vm_area_struct
* vma
,
97 unsigned long start
, unsigned long end
, unsigned int newflags
)
101 if (newflags
== vma
->vm_flags
)
104 if (start
== vma
->vm_start
) {
105 if (end
== vma
->vm_end
)
106 retval
= mlock_fixup_all(vma
, newflags
);
108 retval
= mlock_fixup_start(vma
, end
, newflags
);
110 if (end
== vma
->vm_end
)
111 retval
= mlock_fixup_end(vma
, start
, newflags
);
113 retval
= mlock_fixup_middle(vma
, start
, end
, newflags
);
116 /* keep track of amount of locked VM */
117 pages
= (end
- start
) >> PAGE_SHIFT
;
118 if (!(newflags
& VM_LOCKED
))
120 vma
->vm_mm
->locked_vm
+= pages
;
121 make_pages_present(start
, end
);
126 static int do_mlock(unsigned long start
, size_t len
, int on
)
128 unsigned long nstart
, end
, tmp
;
129 struct vm_area_struct
* vma
, * next
;
132 if (!capable(CAP_IPC_LOCK
))
134 len
= (len
+ ~PAGE_MASK
) & PAGE_MASK
;
140 vma
= find_vma(current
->mm
, start
);
141 if (!vma
|| vma
->vm_start
> start
)
144 for (nstart
= start
; ; ) {
145 unsigned int newflags
;
147 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
149 newflags
= vma
->vm_flags
| VM_LOCKED
;
151 newflags
&= ~VM_LOCKED
;
153 if (vma
->vm_end
>= end
) {
154 error
= mlock_fixup(vma
, nstart
, end
, newflags
);
160 error
= mlock_fixup(vma
, nstart
, tmp
, newflags
);
165 if (!vma
|| vma
->vm_start
!= nstart
) {
170 merge_segments(current
->mm
, start
, end
);
174 asmlinkage
int sys_mlock(unsigned long start
, size_t len
)
176 unsigned long locked
;
177 unsigned long lock_limit
;
180 down(¤t
->mm
->mmap_sem
);
182 len
= (len
+ (start
& ~PAGE_MASK
) + ~PAGE_MASK
) & PAGE_MASK
;
185 locked
= len
>> PAGE_SHIFT
;
186 locked
+= current
->mm
->locked_vm
;
188 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
189 lock_limit
>>= PAGE_SHIFT
;
191 /* check against resource limits */
192 if (locked
> lock_limit
)
195 /* we may lock at most half of physical memory... */
196 /* (this check is pretty bogus, but doesn't hurt) */
197 if (locked
> num_physpages
/2)
200 error
= do_mlock(start
, len
, 1);
203 up(¤t
->mm
->mmap_sem
);
207 asmlinkage
int sys_munlock(unsigned long start
, size_t len
)
211 down(¤t
->mm
->mmap_sem
);
213 len
= (len
+ (start
& ~PAGE_MASK
) + ~PAGE_MASK
) & PAGE_MASK
;
215 ret
= do_mlock(start
, len
, 0);
217 up(¤t
->mm
->mmap_sem
);
221 static int do_mlockall(int flags
)
224 unsigned int def_flags
;
225 struct vm_area_struct
* vma
;
227 if (!capable(CAP_IPC_LOCK
))
231 if (flags
& MCL_FUTURE
)
232 def_flags
= VM_LOCKED
;
233 current
->mm
->def_flags
= def_flags
;
236 for (vma
= current
->mm
->mmap
; vma
; vma
= vma
->vm_next
) {
237 unsigned int newflags
;
239 newflags
= vma
->vm_flags
| VM_LOCKED
;
240 if (!(flags
& MCL_CURRENT
))
241 newflags
&= ~VM_LOCKED
;
242 error
= mlock_fixup(vma
, vma
->vm_start
, vma
->vm_end
, newflags
);
246 merge_segments(current
->mm
, 0, TASK_SIZE
);
250 asmlinkage
int sys_mlockall(int flags
)
252 unsigned long lock_limit
;
255 down(¤t
->mm
->mmap_sem
);
257 if (!flags
|| (flags
& ~(MCL_CURRENT
| MCL_FUTURE
)))
260 lock_limit
= current
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
261 lock_limit
>>= PAGE_SHIFT
;
264 if (current
->mm
->total_vm
> lock_limit
)
267 /* we may lock at most half of physical memory... */
268 /* (this check is pretty bogus, but doesn't hurt) */
269 if (current
->mm
->total_vm
> num_physpages
/2)
272 ret
= do_mlockall(flags
);
275 up(¤t
->mm
->mmap_sem
);
279 asmlinkage
int sys_munlockall(void)
283 down(¤t
->mm
->mmap_sem
);
285 ret
= do_mlockall(0);
287 up(¤t
->mm
->mmap_sem
);